Committing vendored dependencies and generated protos

Change-Id: I349c149b513d9de7d9f60bde2c954a939da2fc54
diff --git a/vendor/github.com/aead/cmac/.gitignore b/vendor/github.com/aead/cmac/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/aead/cmac/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/aead/cmac/LICENSE b/vendor/github.com/aead/cmac/LICENSE
new file mode 100644
index 0000000..b6a9210
--- /dev/null
+++ b/vendor/github.com/aead/cmac/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Andreas Auernhammer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/aead/cmac/README.md b/vendor/github.com/aead/cmac/README.md
new file mode 100644
index 0000000..5c1dda4
--- /dev/null
+++ b/vendor/github.com/aead/cmac/README.md
@@ -0,0 +1,12 @@
+[![Godoc Reference](https://godoc.org/github.com/aead/cmac?status.svg)](https://godoc.org/github.com/aead/cmac)
+
+## The CMAC/OMAC1 message authentication code
+
+The CMAC message authentication code is specified (with AES) in [RFC 4493](https://tools.ietf.org/html/rfc4493 "RFC 4493")
+and [RFC 4494](https://tools.ietf.org/html/rfc4494 "RFC 4494").  
+CMAC is only specified with the AES.  
+
+This implementation supports block ciphers with a block size of 64, 128, 256, 512 or 1024 bit.
+
+### Installation
+Install in your GOPATH: `go get -u github.com/aead/cmac`  
diff --git a/vendor/github.com/aead/cmac/aes/aes-cmac.go b/vendor/github.com/aead/cmac/aes/aes-cmac.go
new file mode 100644
index 0000000..5ca3c6c
--- /dev/null
+++ b/vendor/github.com/aead/cmac/aes/aes-cmac.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// Package aes implements the CMAC MAC with the AES.
+// AES-CMAC is specified in RFC 4493 and RFC 4494.
+package aes // import "github.com/aead/cmac/aes"
+
+import (
+	aesCipher "crypto/aes"
+	"hash"
+
+	"github.com/aead/cmac"
+)
+
+// Sum computes the AES-CMAC checksum with the given tagsize of msg using the cipher.Block.
+func Sum(msg, key []byte, tagsize int) ([]byte, error) {
+	c, err := aesCipher.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+	return cmac.Sum(msg, c, tagsize)
+}
+
+// Verify computes the AES-CMAC checksum with the given tagsize of msg and compares
+// it with the given mac. This functions returns true if and only if the given mac
+// is equal to the computed one.
+func Verify(mac, msg, key []byte, tagsize int) bool {
+	c, err := aesCipher.NewCipher(key)
+	if err != nil {
+		return false
+	}
+	return cmac.Verify(mac, msg, c, tagsize)
+}
+
+// New returns a hash.Hash computing the AES-CMAC checksum.
+func New(key []byte) (hash.Hash, error) {
+	return NewWithTagSize(key, aesCipher.BlockSize)
+}
+
+// NewWithTagSize returns a hash.Hash computing the AES-CMAC checksum with the
+// given tag size. The tag size must between the 1 and the cipher's block size.
+func NewWithTagSize(key []byte, tagsize int) (hash.Hash, error) {
+	c, err := aesCipher.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+	return cmac.NewWithTagSize(c, tagsize)
+}
diff --git a/vendor/github.com/aead/cmac/cmac.go b/vendor/github.com/aead/cmac/cmac.go
new file mode 100644
index 0000000..1e90314
--- /dev/null
+++ b/vendor/github.com/aead/cmac/cmac.go
@@ -0,0 +1,201 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// Package cmac implements the fast CMAC MAC based on
+// a block cipher. This mode of operation fixes security
+// deficiencies of CBC-MAC (CBC-MAC is secure only for
+// fixed-length messages). CMAC is equal to OMAC1.
+// This implementations supports block ciphers with a
+// block size of:
+//	-   64 bit
+//	-  128 bit
+//	-  256 bit
+//	-  512 bit
+//	- 1024 bit
+// Common ciphers like AES, Serpent etc. operate on 128 bit
+// blocks. 256, 512 and 1024 are supported for the Threefish
+// tweakable block cipher. Ciphers with 64 bit blocks are
+// supported, but not recommened.
+// CMAC (with AES) is specified in RFC 4493 and RFC 4494.
+package cmac // import "github.com/aead/cmac"
+
+import (
+	"crypto/cipher"
+	"crypto/subtle"
+	"errors"
+	"hash"
+)
+
+const (
+	// minimal irreducible polynomial for blocksize
+	p64   = 0x1b    // for 64  bit block ciphers
+	p128  = 0x87    // for 128 bit block ciphers (like AES)
+	p256  = 0x425   // special for large block ciphers (Threefish)
+	p512  = 0x125   // special for large block ciphers (Threefish)
+	p1024 = 0x80043 // special for large block ciphers (Threefish)
+)
+
+var (
+	errUnsupportedCipher = errors.New("cipher block size not supported")
+	errInvalidTagSize    = errors.New("tags size must between 1 and the cipher's block size")
+)
+
+// Sum computes the CMAC checksum with the given tagsize of msg using the cipher.Block.
+func Sum(msg []byte, c cipher.Block, tagsize int) ([]byte, error) {
+	h, err := NewWithTagSize(c, tagsize)
+	if err != nil {
+		return nil, err
+	}
+	h.Write(msg)
+	return h.Sum(nil), nil
+}
+
+// Verify computes the CMAC checksum with the given tagsize of msg and compares
+// it with the given mac. This functions returns true if and only if the given mac
+// is equal to the computed one.
+func Verify(mac, msg []byte, c cipher.Block, tagsize int) bool {
+	sum, err := Sum(msg, c, tagsize)
+	if err != nil {
+		return false
+	}
+	return subtle.ConstantTimeCompare(mac, sum) == 1
+}
+
+// New returns a hash.Hash computing the CMAC checksum.
+func New(c cipher.Block) (hash.Hash, error) {
+	return NewWithTagSize(c, c.BlockSize())
+}
+
+// NewWithTagSize returns a hash.Hash computing the CMAC checksum with the
+// given tag size. The tag size must between the 1 and the cipher's block size.
+func NewWithTagSize(c cipher.Block, tagsize int) (hash.Hash, error) {
+	blocksize := c.BlockSize()
+
+	if tagsize <= 0 || tagsize > blocksize {
+		return nil, errInvalidTagSize
+	}
+
+	var p int
+	switch blocksize {
+	default:
+		return nil, errUnsupportedCipher
+	case 8:
+		p = p64
+	case 16:
+		p = p128
+	case 32:
+		p = p256
+	case 64:
+		p = p512
+	case 128:
+		p = p1024
+	}
+
+	m := &macFunc{
+		cipher: c,
+		k0:     make([]byte, blocksize),
+		k1:     make([]byte, blocksize),
+		buf:    make([]byte, blocksize),
+	}
+	m.tagsize = tagsize
+	c.Encrypt(m.k0, m.k0)
+
+	v := shift(m.k0, m.k0)
+	m.k0[blocksize-1] ^= byte(subtle.ConstantTimeSelect(v, p, 0))
+
+	v = shift(m.k1, m.k0)
+	m.k1[blocksize-1] ^= byte(subtle.ConstantTimeSelect(v, p, 0))
+
+	return m, nil
+}
+
+// The CMAC message auth. function
+type macFunc struct {
+	cipher  cipher.Block
+	k0, k1  []byte
+	buf     []byte
+	off     int
+	tagsize int
+}
+
+func (h *macFunc) Size() int { return h.cipher.BlockSize() }
+
+func (h *macFunc) BlockSize() int { return h.cipher.BlockSize() }
+
+func (h *macFunc) Reset() {
+	for i := range h.buf {
+		h.buf[i] = 0
+	}
+	h.off = 0
+}
+
+func (h *macFunc) Write(msg []byte) (int, error) {
+	bs := h.BlockSize()
+	n := len(msg)
+
+	if h.off > 0 {
+		dif := bs - h.off
+		if n > dif {
+			xor(h.buf[h.off:], msg[:dif])
+			msg = msg[dif:]
+			h.cipher.Encrypt(h.buf, h.buf)
+			h.off = 0
+		} else {
+			xor(h.buf[h.off:], msg)
+			h.off += n
+			return n, nil
+		}
+	}
+
+	if length := len(msg); length > bs {
+		nn := length & (^(bs - 1))
+		if length == nn {
+			nn -= bs
+		}
+		for i := 0; i < nn; i += bs {
+			xor(h.buf, msg[i:i+bs])
+			h.cipher.Encrypt(h.buf, h.buf)
+		}
+		msg = msg[nn:]
+	}
+
+	if length := len(msg); length > 0 {
+		xor(h.buf[h.off:], msg)
+		h.off += length
+	}
+
+	return n, nil
+}
+
+func (h *macFunc) Sum(b []byte) []byte {
+	blocksize := h.cipher.BlockSize()
+
+	// Don't change the buffer so the
+	// caller can keep writing and suming.
+	hash := make([]byte, blocksize)
+
+	if h.off < blocksize {
+		copy(hash, h.k1)
+	} else {
+		copy(hash, h.k0)
+	}
+
+	xor(hash, h.buf)
+	if h.off < blocksize {
+		hash[h.off] ^= 0x80
+	}
+
+	h.cipher.Encrypt(hash, hash)
+	return append(b, hash[:h.tagsize]...)
+}
+
+func shift(dst, src []byte) int {
+	var b, bit byte
+	for i := len(src) - 1; i >= 0; i-- { // a range would be nice
+		bit = src[i] >> 7
+		dst[i] = src[i]<<1 | b
+		b = bit
+	}
+	return int(b)
+}
diff --git a/vendor/github.com/aead/cmac/xor.go b/vendor/github.com/aead/cmac/xor.go
new file mode 100644
index 0000000..ecc3862
--- /dev/null
+++ b/vendor/github.com/aead/cmac/xor.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// +build !amd64
+
+package cmac
+
+// xor xors the bytes in dst with src and writes the result to dst.
+// The destination is assumed to have enough space.
+func xor(dst, src []byte) {
+	for i, v := range src {
+		dst[i] ^= v
+	}
+}
diff --git a/vendor/github.com/aead/cmac/xor_amd64.go b/vendor/github.com/aead/cmac/xor_amd64.go
new file mode 100644
index 0000000..3326a89
--- /dev/null
+++ b/vendor/github.com/aead/cmac/xor_amd64.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// +build amd64, !gccgo, !appengine
+
+package cmac
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+// xor xors the bytes in dst with src and writes the result to dst.
+// The destination is assumed to have enough space.
+func xor(dst, src []byte) {
+	n := len(src)
+
+	w := n / wordSize
+	if w > 0 {
+		dstPtr := *(*[]uintptr)(unsafe.Pointer(&dst))
+		srcPtr := *(*[]uintptr)(unsafe.Pointer(&src))
+		for i, v := range srcPtr[:w] {
+			dstPtr[i] ^= v
+		}
+	}
+
+	for i := (n & (^(wordSize - 1))); i < n; i++ {
+		dst[i] ^= src[i]
+	}
+}
diff --git a/vendor/github.com/cboling/omci/.gitignore b/vendor/github.com/cboling/omci/.gitignore
new file mode 100644
index 0000000..b5c16bf
--- /dev/null
+++ b/vendor/github.com/cboling/omci/.gitignore
@@ -0,0 +1,92 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# PyCharm / Goland
+.idea/
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn.  Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+/coverage.html
+/vendor/
diff --git a/vendor/github.com/cboling/omci/Gopkg.lock b/vendor/github.com/cboling/omci/Gopkg.lock
new file mode 100644
index 0000000..0f63cbc
--- /dev/null
+++ b/vendor/github.com/cboling/omci/Gopkg.lock
@@ -0,0 +1,69 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  branch = "master"
+  digest = "1:5671867a7f78a839bce5a3a8d4513bdbec7f799ab9c919a2fb50fe6c167cd61e"
+  name = "github.com/aead/cmac"
+  packages = [
+    ".",
+    "aes",
+  ]
+  pruneopts = "UT"
+  revision = "7af84192f0b1d66a78841ebd3e0d2f5212a4a928"
+
+[[projects]]
+  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  pruneopts = "UT"
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
+
+[[projects]]
+  digest = "1:e47d51dab652d26c3fba6f8cba403f922d02757a82abdc77e90df7948daf296e"
+  name = "github.com/deckarep/golang-set"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e"
+  version = "v1.7.1"
+
+[[projects]]
+  branch = "master"
+  digest = "1:e1bdf8bd56b2c7775b12f17219b2e6b1d45a39cf20e42926f9069b1755639918"
+  name = "github.com/google/gopacket"
+  packages = [
+    ".",
+    "layers",
+  ]
+  pruneopts = "UT"
+  revision = "66eed7fc5258c34b3ead3c5964ea538025686a68"
+
+[[projects]]
+  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  pruneopts = "UT"
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  digest = "1:972c2427413d41a1e06ca4897e8528e5a1622894050e2f527b38ddf0f343f759"
+  name = "github.com/stretchr/testify"
+  packages = ["assert"]
+  pruneopts = "UT"
+  revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
+  version = "v1.3.0"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  input-imports = [
+    "github.com/aead/cmac/aes",
+    "github.com/deckarep/golang-set",
+    "github.com/google/gopacket",
+    "github.com/google/gopacket/layers",
+    "github.com/stretchr/testify/assert",
+  ]
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/cboling/omci/Gopkg.toml b/vendor/github.com/cboling/omci/Gopkg.toml
new file mode 100644
index 0000000..3a8d1da
--- /dev/null
+++ b/vendor/github.com/cboling/omci/Gopkg.toml
@@ -0,0 +1,17 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+
+
+[[constraint]]
+  name = "github.com/google/gopacket"
+  branch = "master"
+
+[[constraint]]
+  name = "github.com/stretchr/testify"
+  version = "1.3.0"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/cboling/omci/LICENSE b/vendor/github.com/cboling/omci/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/cboling/omci/README.md b/vendor/github.com/cboling/omci/README.md
new file mode 100644
index 0000000..4a29ef4
--- /dev/null
+++ b/vendor/github.com/cboling/omci/README.md
@@ -0,0 +1,98 @@
+# OMCI
+
+OMCI gopacket library supports the encoding and decoding of ITU G.988 OMCI
+messages. It is currently a work in progress.
+
+## Message Types supported and under unit test
+The following OMCI message types currently have been coded and are covered
+satisfactory by unit tests.
+
+ - CreateRequest
+ - CreateResponse
+ - DeleteRequest
+ - DeleteResponse
+ - SetRequest
+ - GetRequest
+ - GetAllAlarmsRequest
+ - GetAllAlarmsResponse
+ - GetAllAlarmsNextRequest
+ - MibUploadRequest
+ - MibUploadResponse
+ - MibUploadNextRequest
+ - MibResetRequest
+ - MibResetResponse
+ - SynchronizeTimeRequest
+
+## Message Types supported but lacking full unit test
+The following OMCI message types currently have been coded and are partially covered
+by unit tests, but work still remains for sufficient/better unit test coverage.
+
+ - SetResponse
+ - GetResponse
+ - GetAllAlarmsNextResponse
+ - MibUploadNextResponse
+ - SynchronizeTimeResponse
+ - AttributeValueChange
+ - RebootRequest
+ - RebootResponse
+ - StartSoftwareDownloadRequest
+ - GetNextRequest
+ - GetNextResponse
+
+## Message Types supported but lacking any unit test
+The following OMCI message types currently have been coded but do not
+have any unit test coverage.
+
+ - StartSoftwareDownloadResponse
+ - DownloadSectionRequest
+ - DownloadSectionResponse
+ - EndSoftwareDownloadRequest
+ - EndSoftwareDownloadResponse
+ - ActivateSoftwareRequest
+ - ActivateSoftwareResponse
+ - CommitSoftwareRequest
+ - CommitSoftwareResponse
+ - GetCurrentDataRequest
+ - GetCurrentDataResponse
+ - AlarmNotification
+ 
+## Message Types not yet supported
+
+The following OMCI message types currently have not been coded.
+
+ - TestResult
+ - TestRequest
+ - TestResponse
+ - SetTableRequest
+ - SetTableResponse
+
+## Current user-test coverage
+
+
+## Other outstanding items
+
+Besides OMCI Message decode/serialization, and associated unit tests, the following items
+would be needed or useful in a first official release of this library. Some changes are
+to be done in the generated OMCI ME code as well.
+
+ - Specific examples of how to use this library (expand upon DecodeEncode.go examples)
+   Include unknown ME examples and how to catch various common or expected errors
+ - Add Alarm Table Support (generated MEs also)
+ - Add AVC flag for appropriate attributes
+ - For serialization, check early for message size exceeded
+ - Check proper gopacket use of Payload/Contents properties and make sure we
+   follow guidelines (if there are any)
+ - For 'mebase.go' string output, look up ME name and output as needed
+ - Look through 'error' messages and see if there are a few very common ones that
+   could be moved to a custom class to allow for better user interception/decode of
+   these errors.
+ 
+The following would be 'nice' to have but are not necessary for initial code release
+ - Extended message support
+ - MIC Encode/Decode support
+
+## Create requests
+Currently the OMCI parser does not decode the default Set-By-Create attribute values
+from the ITU document. So for attributes that do not have a default of zero, you must
+specify the defaults if you use the 'meframe.go' routine to create a CreateRequest
+for a specific Managed Entity Instance.
diff --git a/vendor/github.com/cboling/omci/coverage.cmd b/vendor/github.com/cboling/omci/coverage.cmd
new file mode 100644
index 0000000..3344ed8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/coverage.cmd
@@ -0,0 +1,24 @@
+@echo off
+rem    Coverage report for windows builds
+rem
+rem First general build
+echo Building the source
+go build
+if ERRORLEVEL 1 GOTO buildFaild
+
+echo Starting unit test coverage
+go test . examples/... generated/... -coverprofile=cp.out
+
+rem Output HTML coverage report (to coverage.html)
+echo Creating HTML coverage report (coverage.html)
+go tool cover -html=cp.out
+
+rem Now show in default browser
+echo Launching browser with results
+rem start coverage.html
+
+@echo Done
+exit /B 0
+
+:buildFailed
+@echo Build failed
\ No newline at end of file
diff --git a/vendor/github.com/cboling/omci/coverage.sh b/vendor/github.com/cboling/omci/coverage.sh
new file mode 100644
index 0000000..26bfe5c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/coverage.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+#
+#  Coverage report for windows builds
+#
+#First general build
+echo "Building the source"
+go build || exit $?
+
+echo "Starting unit test coverage"
+go test . examples/... generated/... -coverprofile=cp.out
+
+# Output HTML coverage report (to coverage.html)
+echo "Creating HTML coverage report (coverage.html)"
+go tool cover -html=cp.out
+
+# Now show in default browser
+echo "Launching browser with results"
diff --git a/vendor/github.com/cboling/omci/generated/aal5performancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/aal5performancemonitoringhistorydata.go
new file mode 100644
index 0000000..d2dd2e2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/aal5performancemonitoringhistorydata.go
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Aal5PerformanceMonitoringHistoryDataClassId ClassID = ClassID(18)
+
+var aal5performancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// Aal5PerformanceMonitoringHistoryData (class ID #18)
+//	This ME collects PM data as a result of performing segmentation and reassembly (SAR) and
+//	convergence sublayer (CS) level protocol monitoring. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of an IW VCC TP that represents AAL5
+//		functions.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the IW VCC TP. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Sum Of Invalid Cs Field Errors
+//			Sum of invalid CS field errors: This attribute counts the sum of invalid CS field errors. For
+//			AAL type 5, this attribute is a single count of the number of CS PDUs discarded due to one of
+//			the following error conditions: invalid common part indicator (CPI), oversized received SDU, or
+//			length violation. (R) (mandatory) (4 bytes)
+//
+//		Crc Violations
+//			CRC violations: This attribute counts CRC violations detected on incoming SAR PDUs. (R)
+//			(mandatory) (4 bytes)
+//
+//		Reassembly Timer Expirations
+//			Reassembly timer expirations: This attribute counts reassembly timer expirations. (R) (mandatory
+//			if reassembly timer is implemented) (4 bytes)
+//
+//		Buffer Overflows
+//			Buffer overflows: This attribute counts the number of times where there was not enough buffer
+//			space for a reassembled packet. (R) (mandatory) (4 bytes)
+//
+//		Encap Protocol Errors
+//			Encap protocol errors: This attribute counts the number of times that [IETF RFC 2684]
+//			encapsulation protocol detected a bad header. (R) (mandatory) (4 bytes)
+//
+type Aal5PerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	aal5performancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "Aal5PerformanceMonitoringHistoryData",
+		ClassID: 18,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("SumOfInvalidCsFieldErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("CrcViolations", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("ReassemblyTimerExpirations", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("BufferOverflows", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("EncapProtocolErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewAal5PerformanceMonitoringHistoryData (class ID 18 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewAal5PerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(aal5performancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/aal5profile.go b/vendor/github.com/cboling/omci/generated/aal5profile.go
new file mode 100644
index 0000000..af7db41
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/aal5profile.go
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Aal5ProfileClassId ClassID = ClassID(16)
+
+var aal5profileBME *ManagedEntityDefinition
+
+// Aal5Profile (class ID #16)
+//	This ME organizes data that describe the AAL type 5 processing functions of the ONU. It is used
+//	with the IW VCC TP ME.
+//
+//	This ME is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the IW VCC TP.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Max Cpcs Pdu Size
+//			Max CPCS PDU size: This attribute specifies the maximum CPCS PDU size to be transmitted over the
+//			connection in both upstream and downstream directions. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Aal Mode
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Sscs Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+type Aal5Profile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	aal5profileBME = &ManagedEntityDefinition{
+		Name:    "Aal5Profile",
+		ClassID: 16,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("MaxCpcsPduSize", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("AalMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("SscsType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewAal5Profile (class ID 16 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewAal5Profile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(aal5profileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ani-g.go b/vendor/github.com/cboling/omci/generated/ani-g.go
new file mode 100644
index 0000000..f86fc9b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ani-g.go
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const AniGClassId ClassID = ClassID(263)
+
+var anigBME *ManagedEntityDefinition
+
+// AniG (class ID #263)
+//	This ME organizes data associated with each access network interface supported by a GPON ONU.
+//	The ONU automatically creates one instance of this ME for each PON physical port.
+//
+//	Relationships
+//		An instance of this ME is associated with each instance of a physical PON interface.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Its value
+//			indicates the physical position of the PON interface. The first byte is the slot ID, defined in
+//			clause 9.1.5. The second byte is the port ID. (R) (mandatory) (2 bytes)
+//
+//		Sr Indication
+//			SR indication: This Boolean attribute indicates the ONU's capability to report queue status for
+//			DBA. The value true means that status reporting is available for all TCONTs that are associated
+//			with the ANI. (R) (mandatory) (1 byte)
+//
+//		Total TCont Number
+//			Total TCONT number: This attribute indicates the total number of T-CONTs that can be supported
+//			on this ANI. (R) (mandatory) (2 bytes)
+//
+//		Gem Block Length
+//			In all other ITU-T PON systems, the unit for queue occupancy reporting is fixed in at 4 bytes by
+//			the respective TC layer specification.
+//
+//		Piggyback Dba Reporting
+//			(R) (mandatory) (1 byte)
+//
+//		Deprecated
+//			Deprecated:	This attribute should be set to 0 by the ONU and ignored by the OLT. (R) (mandatory)
+//			(1 byte)
+//
+//		Signal Fail Threshold
+//			Signal fail (SF) threshold: This attribute specifies the downstream bit error rate (BER)
+//			threshold to detect the SF alarm. When this value is y, the BER threshold is 10–y. Valid values
+//			are 3..8. Upon ME instantiation, the ONU sets this attribute to 5. (R, W) (mandatory) (1 byte)
+//
+//		Signal Degrade Sd Threshold
+//			Signal degrade (SD) threshold: This attribute specifies the downstream BER threshold to detect
+//			the SD alarm. When this value is x, the BER threshold for SD is 10–x. Valid values are 4..10.
+//			The SD threshold must be lower than the SF threshold; i.e., x > y. Upon ME instantiation, the
+//			ONU sets this attribute to 9. (R, W) (mandatory) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Optical Signal Level
+//			Optical signal level: This attribute reports the current measurement of the total downstream
+//			optical signal level. Its value is a 2s complement integer referred to 1  mW (i.e., 1 dBm), with
+//			0.002 dB granularity. (R) (optional) (2 bytes)
+//
+//		Lower Optical Threshold
+//			Lower optical threshold: This attribute specifies the optical level the ONU uses to declare the
+//			downstream low received optical power alarm. Valid values are –127 dBm (coded as 254) to 0 dBm
+//			(coded as 0) in 0.5 dB increments. The default value 0xFF selects the ONU's internal policy.
+//			(R, W) (optional) (1 byte)
+//
+//		Upper Optical Threshold
+//			Upper optical threshold: This attribute specifies the optical level the ONU uses to declare the
+//			downstream high received optical power alarm. Valid values are –127 dBm (coded as 254) to 0 dBm
+//			(coded as 0) in 0.5 dB increments. The default value 0xFF selects the ONU's internal policy.
+//			(R, W) (optional) (1 byte)
+//
+//		Onu Response Time
+//			(R) (optional) (2 bytes)
+//
+//		Transmit Optical Level
+//			Transmit optical level: This attribute reports the current measurement of mean optical launch
+//			power. Its value is a 2s complement integer referred to 1 mW (i.e., 1 dBm), with 0.002 dB
+//			granularity. (R) (optional) (2 bytes)
+//
+//		Lower Transmit Power Threshold
+//			Lower transmit power threshold: This attribute specifies the minimum mean optical launch power
+//			that the ONU uses to declare the low transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value –63.5 (0x81)
+//			selects the ONU's internal policy. (R, W) (optional) (1 byte)
+//
+//		Upper Transmit Power Threshold
+//			Upper transmit power threshold: This attribute specifies the maximum mean optical launch power
+//			that the ONU uses to declare the high transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value –63.5 (0x81)
+//			selects the ONU's internal policy. (R, W) (optional) (1 byte)
+//
+type AniG struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	anigBME = &ManagedEntityDefinition{
+		Name:    "AniG",
+		ClassID: 263,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+			Test,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("SrIndication", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("TotalTcontNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint16Field("GemBlockLength", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  ByteField("PiggybackDbaReporting", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  ByteField("Deprecated", 0, mapset.NewSetWith(Read), false, false, false, true, 5),
+			6:  ByteField("SignalFailThreshold", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("SignalDegradeSdThreshold", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 8),
+			9:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: Uint16Field("OpticalSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: ByteField("LowerOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("UpperOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("OnuResponseTime", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint16Field("TransmitOpticalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 14),
+			15: ByteField("LowerTransmitPowerThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+			16: ByteField("UpperTransmitPowerThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewAniG (class ID 263 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewAniG(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(anigBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/attribute.go b/vendor/github.com/cboling/omci/generated/attribute.go
new file mode 100644
index 0000000..1246c56
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/attribute.go
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/deckarep/golang-set"
+	"github.com/google/gopacket"
+	"sort"
+	"strings"
+)
+
+type AttributeDefinitionMap map[uint]*AttributeDefinition
+
+// AttributeDefinition defines a single specific Managed Entity's attributes
+type AttributeDefinition struct {
+	Name         string
+	Index        uint
+	DefValue     interface{} // Note: Not supported yet
+	Size         int
+	Access       mapset.Set // AttributeAccess...
+	Constraint   func(interface{}) *ParamError
+	Avc          bool // If true, an AVC notification can occur for the attribute
+	Tca          bool // If true, a threshold crossing alert alarm notification can occur for the attribute
+	Counter      bool // If true, this attribute is a PM counter
+	Optional     bool // If true, attribute is option, else mandatory
+	TableSupport bool // If true, attribute is a table
+	Deprecated   bool // If true, this attribute is deprecated and only 'read' operations (if-any) performed
+}
+
+func (attr *AttributeDefinition) String() string {
+	return fmt.Sprintf("AttributeDefinition: %v (%v): Size: %v, Default: %v, Access: %v",
+		attr.GetName(), attr.GetIndex(), attr.GetSize(), attr.GetDefault(), attr.GetAccess())
+}
+func (attr *AttributeDefinition) GetName() string         { return attr.Name }
+func (attr *AttributeDefinition) GetIndex() uint          { return attr.Index }
+func (attr *AttributeDefinition) GetDefault() interface{} { return attr.DefValue }
+func (attr *AttributeDefinition) GetSize() int            { return attr.Size }
+func (attr *AttributeDefinition) GetAccess() mapset.Set   { return attr.Access }
+func (attr *AttributeDefinition) GetConstraints() func(interface{}) *ParamError {
+	return attr.Constraint
+}
+func (attr *AttributeDefinition) IsTableAttribute() bool {
+	return attr.TableSupport
+}
+
+func (attr *AttributeDefinition) Decode(data []byte, df gopacket.DecodeFeedback, msgType byte) (interface{}, error) {
+	if attr.IsTableAttribute() {
+		value, err := attr.tableAttributeDecode(data, df, msgType)
+		if err != nil {
+			return nil, err
+		}
+		if attr.GetConstraints() != nil {
+			if omciErr := attr.GetConstraints()(value); omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	}
+	size := attr.GetSize()
+
+	if len(data) < size {
+		df.SetTruncated()
+		return nil, NewMessageTruncatedError("packet too small for field")
+	}
+	switch attr.GetSize() {
+	default:
+		value := make([]byte, size)
+		copy(value, data[:size])
+		if attr.GetConstraints() != nil {
+			if omciErr := attr.GetConstraints()(value); omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	case 1:
+		value := data[0]
+		if attr.GetConstraints() != nil {
+			if omciErr := attr.GetConstraints()(value); omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	case 2:
+		value := binary.BigEndian.Uint16(data[0:2])
+		if attr.GetConstraints() != nil {
+			if omciErr := attr.GetConstraints()(value); omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	case 4:
+		value := binary.BigEndian.Uint32(data[0:4])
+		if attr.GetConstraints() != nil {
+			if omciErr := attr.GetConstraints()(value); omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	case 8:
+		value := binary.BigEndian.Uint64(data[0:8])
+		if attr.GetConstraints() != nil {
+			omciErr := attr.GetConstraints()(value)
+			if omciErr != nil {
+				return nil, omciErr.GetError()
+			}
+		}
+		return value, nil
+	}
+}
+
+func (attr *AttributeDefinition) SerializeTo(value interface{}, b gopacket.SerializeBuffer,
+	msgType byte, bytesAvailable int) (int, error) {
+	if attr.IsTableAttribute() {
+		return attr.tableAttributeSerializeTo(value, b, msgType, bytesAvailable)
+	}
+	size := attr.GetSize()
+	if bytesAvailable < size {
+		return 0, NewMessageTruncatedError(fmt.Sprintf("not enough space for attribute: %v", attr.Name))
+	}
+	bytes, err := b.AppendBytes(size)
+	if err != nil {
+		return 0, err
+	}
+	switch size {
+	default:
+		copy(bytes, value.([]byte))
+	case 1:
+		switch value.(type) {
+		case int:
+			bytes[0] = byte(value.(int))
+		default:
+			bytes[0] = value.(byte)
+		}
+	case 2:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint16(bytes, uint16(value.(int)))
+		default:
+			binary.BigEndian.PutUint16(bytes, value.(uint16))
+		}
+	case 4:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint32(bytes, uint32(value.(int)))
+		default:
+			binary.BigEndian.PutUint32(bytes, value.(uint32))
+		}
+	case 8:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint64(bytes, uint64(value.(int)))
+		default:
+			binary.BigEndian.PutUint64(bytes, value.(uint64))
+		}
+	}
+	return size, nil
+}
+
+// BufferToTableAttributes takes the reconstructed octet buffer transmitted for
+// a table attribute (over many GetNextResponses) and converts it into the desired
+// format for each table row
+func (attr *AttributeDefinition) BufferToTableAttributes(data []byte) (interface{}, error) {
+	// Source is network byte order octets. Convert to proper array of slices
+	rowSize := attr.GetSize()
+	dataSize := len(data)
+	index := 0
+
+	switch rowSize {
+	default:
+		value := make([][]byte, dataSize/rowSize)
+		for offset := 0; offset < dataSize; offset += rowSize {
+			value[index] = make([]byte, rowSize)
+			copy(value[index], data[offset:])
+			index++
+		}
+		return value, nil
+	case 1:
+		value := make([]byte, dataSize)
+		copy(value, data)
+		return value, nil
+	case 2:
+		value := make([]uint16, dataSize/2)
+		for offset := 0; offset < dataSize; offset += rowSize {
+			value[offset] = binary.BigEndian.Uint16(data[offset:])
+			index++
+		}
+		return value, nil
+	case 4:
+		value := make([]uint32, dataSize/4)
+		for offset := 0; offset < dataSize; offset += rowSize {
+			value[offset] = binary.BigEndian.Uint32(data[offset:])
+			index++
+		}
+		return value, nil
+	case 8:
+		value := make([]uint64, dataSize/8)
+		for offset := 0; offset < dataSize; offset += rowSize {
+			value[offset] = binary.BigEndian.Uint64(data[offset:])
+			index++
+		}
+		return value, nil
+	}
+}
+
+func (attr *AttributeDefinition) tableAttributeDecode(data []byte, df gopacket.DecodeFeedback, msgType byte) (interface{}, error) {
+	// Serialization of a table depends on the type of message. A
+	// Review of ITU-T G.988 shows that access on tables are
+	// either Read and/or Write, never Set-by-Create
+	switch msgType {
+	default:
+		return nil, errors.New(fmt.Sprintf("unsupported Message Type '%v' for table serialization", msgType))
+
+	case byte(Get) | AK: // Get Response
+		// Size
+		value := binary.BigEndian.Uint32(data[0:4])
+		return value, nil
+
+	case byte(GetNext) | AK: // Get Next Response
+		// Block of data (octets) that need to be reassembled before conversion
+		// to table/row-data.  If table attribute is not explicitly given a value
+		// we have to assume the entire data buffer is the value. The receiver of
+		// this frame will need to trim off any addtional information at the end
+		// of the last frame sequence since they (and the ONU) are the only ones
+		// who know how long the data really is.
+		size := attr.GetSize()
+		if size != 0 && len(data) < attr.GetSize() {
+			df.SetTruncated()
+			return nil, NewMessageTruncatedError("packet too small for field")
+		} else if size == 0 {
+			return nil, NewProcessingError("table attributes with no size are not supported: %v", attr.Name)
+		}
+		return data, nil
+
+	case byte(Set) | AR: // Set Request
+		fmt.Println("TODO")
+		return nil, errors.New("TODO")
+
+	case byte(SetTable) | AR: // Set Table Request
+		// TODO: Only baseline supported at this time
+		return nil, errors.New("attribute encode for set-table-request not yet supported")
+	}
+	return nil, errors.New("TODO")
+}
+
+func (attr *AttributeDefinition) tableAttributeSerializeTo(value interface{}, b gopacket.SerializeBuffer, msgType byte,
+	bytesAvailable int) (int, error) {
+	// Serialization of a table depends on the type of message. A
+	// Review of ITU-T G.988 shows that access on tables are
+	// either Read and/or Write, never Set-by-Create
+	switch msgType {
+	default:
+		return 0, errors.New(fmt.Sprintf("unsupported Message Type '%v' for table serialization", msgType))
+
+	case byte(Get) | AK: // Get Response
+		// Size
+		if bytesAvailable < 4 {
+			return 0, NewMessageTruncatedError(fmt.Sprintf("not enough space for attribute: %v", attr.Name))
+		}
+		if dwordSize, ok := value.(uint32); ok {
+			bytes, err := b.AppendBytes(4)
+			if err != nil {
+				return 0, err
+			}
+			binary.BigEndian.PutUint32(bytes, dwordSize)
+			return 4, nil
+		}
+		return 0, errors.New("unexpected type for table serialization")
+
+	case byte(GetNext) | AK: // Get Next Response
+		// Values are already in network by order form
+		if data, ok := value.([]byte); ok {
+			if bytesAvailable < len(data) {
+				return 0, NewMessageTruncatedError(fmt.Sprintf("not enough space for attribute: %v", attr.Name))
+			}
+			bytes, err := b.AppendBytes(len(data))
+			if err != nil {
+				return 0, err
+			}
+			copy(bytes, data)
+			return len(data), nil
+		}
+		return 0, errors.New("unexpected type for table serialization")
+
+	case byte(Set) | AR: // Set Request
+		fmt.Println("TODO")
+
+	case byte(SetTable) | AR: // Set Table Request
+		// TODO: Only baseline supported at this time
+		return 0, errors.New("attribute encode for set-table-request not yet supported")
+	}
+	size := attr.GetSize()
+	if bytesAvailable < size {
+		return 0, NewMessageTruncatedError(fmt.Sprintf("not enough space for attribute: %v", attr.Name))
+	}
+	bytes, err := b.AppendBytes(size)
+	if err != nil {
+		return 0, err
+	}
+	switch attr.GetSize() {
+	default:
+		copy(bytes, value.([]byte))
+	case 1:
+		switch value.(type) {
+		case int:
+			bytes[0] = byte(value.(int))
+		default:
+			bytes[0] = value.(byte)
+		}
+	case 2:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint16(bytes, uint16(value.(int)))
+		default:
+			binary.BigEndian.PutUint16(bytes, value.(uint16))
+		}
+	case 4:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint32(bytes, uint32(value.(int)))
+		default:
+			binary.BigEndian.PutUint32(bytes, value.(uint32))
+		}
+	case 8:
+		switch value.(type) {
+		case int:
+			binary.BigEndian.PutUint64(bytes, uint64(value.(int)))
+		default:
+			binary.BigEndian.PutUint64(bytes, value.(uint64))
+		}
+	}
+	return size, nil
+}
+
+// GetAttributeDefinitionByName searches the attribute definition map for the
+// attribute with the specified name (case insensitive)
+func GetAttributeDefinitionByName(attrMap *AttributeDefinitionMap, name string) (*AttributeDefinition, OmciErrors) {
+	nameLower := strings.ToLower(name)
+	for _, attrVal := range *attrMap {
+		if nameLower == strings.ToLower(attrVal.GetName()) {
+			return attrVal, nil
+		}
+	}
+	return nil, NewAttributeFailureError(fmt.Sprintf("attribute '%s' not found", name))
+}
+
+// GetAttributeDefinitionMapKeys is a convenience functions since we may need to
+// iterate a map in key index order. Maps in Go since v1.0 the iteration order
+// of maps have been randomized.
+func GetAttributeDefinitionMapKeys(attrMap AttributeDefinitionMap) []uint {
+	var keys []uint
+	for k := range attrMap {
+		keys = append(keys, k)
+	}
+	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+	return keys
+}
+
+// GetAttributeBitmap is a convenience functions to scan a list of attributes
+// and return the bitmask that represents them
+func GetAttributeBitmap(attrMap AttributeDefinitionMap, attributes mapset.Set) (uint16, error) {
+	var mask uint16
+	for k, def := range attrMap {
+		if attributes.Contains(def.Name) {
+			mask |= 1 << uint16(16-k)
+			attributes.Remove(def.Name)
+		}
+	}
+	if attributes.Cardinality() > 0 {
+		return 0, errors.New(fmt.Sprintf("unsupported attributes: %v", attributes))
+	}
+	return mask, nil
+}
+
+///////////////////////////////////////////////////////////////////////
+// Packet definitions for attributes of various types/sizes
+
+func ByteField(name string, defVal uint8, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         1,
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+func Uint16Field(name string, defVal uint16, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         2,
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+func Uint32Field(name string, defVal uint32, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         4,
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+func Uint64Field(name string, defVal uint64, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         8,
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+func MultiByteField(name string, size uint, defVal []byte, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         int(size),
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+// Notes on various OMCI ME Table attribute fields.  This comment will eventually be
+// removed once a good table solution is implemented.  These are not all the MEs with
+// table attributes, but probably ones I care about to support initially.
+//
+//   ME                     Notes
+//  --------------------------------------------------------------------------------------------
+//	Port-mapping package -> Combined Port table -> N * 25 sized rows (port (1) + ME(2) * 12)
+//  ONU Remote Debug     -> Reply table (N bytes)
+//  ONU3-G               -> Status snapshot recordtable M x N bytes
+//  MCAST Gem interworkTP-> IPv4 multicast adress table (12*n) (two 2 byte fields, two 4 byte fields)
+//                          IPv6 multicast adress table (24*n) (various sub-fields)
+//  L2 mcast gem TP      -> MCAST MAC addr filtering table (11 * n) (various sub-fields)
+//  MAC Bridge Port Filt -> MAC Filter table (8 * n) (3 fields, some are bits)      *** BITS ***
+//  MAC Bridge Port data -> Bridge Table (8*M) (vaius fields, some are bits)        *** BITS ***
+//  VLAN tagging filter  -> Rx Vlan tag op table (16 * n) Lots of bit fields        *** BITS ***
+//  MCAST operations profile
+//  MCAST Subscriber config info
+//  MCAST subscriber monitor
+//  OMCI                -> Two tables (N bytes and 2*N bytes)
+//  General pupose buffer   -> N bytes
+//  Enhanced security control (17 * N bytes), (16 * P Bytes) , (16 * Q bytes), and more...
+//
+// An early example of info to track
+//
+type TableInfo struct {
+	Value interface{}
+	Size  int
+}
+
+func (t *TableInfo) String() string {
+	return fmt.Sprintf("TableInfo: Size: %d, Value(s): %v", t.Size, t.Value)
+}
+
+// Now the field
+func TableField(name string, tableInfo TableInfo, access mapset.Set,
+	avc bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     tableInfo.Value,
+		Size:         tableInfo.Size, //Number of elements
+		Access:       access,
+		Avc:          avc,
+		Counter:      false,
+		TableSupport: true,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+func UnknownField(name string, defVal uint64, access mapset.Set, avc bool,
+	counter bool, optional bool, deprecated bool, index uint) *AttributeDefinition {
+	return &AttributeDefinition{
+		Name:         name,
+		Index:        index,
+		DefValue:     defVal,
+		Size:         99999999,
+		Access:       access,
+		Avc:          avc,
+		Counter:      counter,
+		TableSupport: false,
+		Optional:     optional,
+		Deprecated:   deprecated,
+	}
+}
+
+///////////////////////////////////////////////////////////////////////
+// Attribute Name to Value    (Interfaced defined in generated subdirectory)
+
+type AttributeValueMap map[string]interface{}
diff --git a/vendor/github.com/cboling/omci/generated/attributeme.go b/vendor/github.com/cboling/omci/generated/attributeme.go
new file mode 100644
index 0000000..e405e66
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/attributeme.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const AttributeMeClassId ClassID = ClassID(289)
+
+var attributemeBME *ManagedEntityDefinition
+
+// AttributeMe (class ID #289)
+//	This ME describes a particular attribute type that is supported by the ONU. This ME is not
+//	included in an MIB upload.
+//
+//	Relationships
+//		One or more attribute entities are related to each ME entity. More than one ME entity can refer
+//		to a given attribute entity.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This number is
+//			the same as the one that appears in the attributes table in the ME. Only one instance of each
+//			unique attribute need be created. The ONU can assign attribute numbering as it pleases, out of
+//			the pool of 64K IDs; however, it is suggested that the numbering follow a rational scheme to aid
+//			human readability. (R) (mandatory) (2 bytes)
+//
+//		Name
+//			Name:	This attribute contains a 25 byte mnemonic tag for the attribute. Strings shorter than
+//			25 bytes are padded with null characters. (R) (mandatory) (25 bytes)
+//
+//		Size
+//			Size:	This attribute contains the size of the attribute, in bytes. The value 0 indicates that
+//			the attribute can have a variable/unknown size. (R) (mandatory) (2 bytes)
+//
+//		Access
+//			(R) (mandatory) (1 byte)
+//
+//		Format
+//			(R) (mandatory) (1 byte)
+//
+//		Lower Limit
+//			Lower limit:	This attribute provides the lowest value for the attribute. Valid for numeric types
+//			(pointer, signed integer, unsigned integer) only. For attributes smaller than 4 bytes, the
+//			desired numeric value is expressed in 4 byte representation (for example, the 2s complement
+//			1 byte integer 0xFE is expressed as 0xFFFF FFFE; the unsigned 1 byte integer 0xFE is expressed
+//			as 0x0000 00FE). (R) (mandatory) (4 bytes)
+//
+//		Upper Limit
+//			Upper limit:	This attribute provides the highest value for the attribute. It has the same
+//			validity and format as the lower limit attribute. (R) (mandatory) (4 bytes)
+//
+//		Bit Field
+//			Bit field:	This attribute is a mask of the supported bits in a bit field attribute, valid for
+//			bit field type only. A 1 in any position signifies that its code point is supported, while 0
+//			indicates that it is not supported. For bit fields smaller than 4 bytes, the attribute is
+//			aligned at the least significant end of the mask. (R) (mandatory) (4 bytes)
+//
+//		Code Points Table
+//			Code points table: This attribute lists the code points supported by an enumerated attribute.
+//			(R) (mandatory) (2 * Q bytes, where Q is the number of entries in the table.)
+//
+//		Support
+//			(R) (mandatory) (1 byte)
+//
+type AttributeMe struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	attributemeBME = &ManagedEntityDefinition{
+		Name:    "AttributeMe",
+		ClassID: 289,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("Name", 25, nil, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("Size", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: ByteField("Access", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: ByteField("Format", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("LowerLimit", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("UpperLimit", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("BitField", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: TableField("CodePointsTable", TableInfo{0, 2}, mapset.NewSetWith(Read), false, false, false, 8),
+			9: ByteField("Support", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+		},
+	}
+}
+
+// NewAttributeMe (class ID 289 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewAttributeMe(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(attributemeBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/authenticationsecuritymethod.go b/vendor/github.com/cboling/omci/generated/authenticationsecuritymethod.go
new file mode 100644
index 0000000..58b64dd
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/authenticationsecuritymethod.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const AuthenticationSecurityMethodClassId ClassID = ClassID(148)
+
+var authenticationsecuritymethodBME *ManagedEntityDefinition
+
+// AuthenticationSecurityMethod (class ID #148)
+//	The authentication security method defines the user ID and password configuration to establish a
+//	session between a client and a server. This object may be used in the role of the client or
+//	server. An instance of this ME is created by the OLT if authenticated communication is
+//	necessary.
+//
+//	Relationships
+//		One instance of this management entity may be associated with a network address ME. This ME may
+//		also be cited by other MEs that require authentication parameter management.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0xFFFF
+//			is reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Validation Scheme
+//			(R, W) (mandatory) (1 byte)
+//
+//		Username 1
+//			Username 1:	This string attribute is the user name. If the string is shorter than 25 bytes, it
+//			must be null terminated (Note). (R, W) (mandatory) (25 bytes)
+//
+//		Password
+//			Password:	This string attribute is the password. If the string is shorter than 25 bytes, it must
+//			be null terminated. (R, W) (mandatory) (25 bytes)
+//
+//		Realm
+//			Realm:	This string attribute specifies the realm used in digest authentication. If the string is
+//			shorter than 25 bytes, it must be null terminated. (R, W) (mandatory) (25 bytes)
+//
+//		Username 2
+//			NOTE – The total username is the concatenation of the username 1 and username 2 attributes if
+//			and only if: a) username 1 comprises 25 non-null characters; b) username 2 is supported by the
+//			ONU; and c) username 2 contains a leading non-null character string. Otherwise, the total
+//			username is simply the value of the username 1 attribute.
+//
+type AuthenticationSecurityMethod struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	authenticationsecuritymethodBME = &ManagedEntityDefinition{
+		Name:    "AuthenticationSecurityMethod",
+		ClassID: 148,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("ValidationScheme", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: MultiByteField("Username1", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: MultiByteField("Password", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: MultiByteField("Realm", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: MultiByteField("Username2", 25, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewAuthenticationSecurityMethod (class ID 148 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewAuthenticationSecurityMethod(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(authenticationsecuritymethodBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/bbftr-069managementserver.go b/vendor/github.com/cboling/omci/generated/bbftr-069managementserver.go
new file mode 100644
index 0000000..c3108af
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/bbftr-069managementserver.go
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const BbfTr069ManagementServerClassId ClassID = ClassID(340)
+
+var bbftr069managementserverBME *ManagedEntityDefinition
+
+// BbfTr069ManagementServer (class ID #340)
+//	If functions within the ONU are managed by [BBF TR-069], this ME allows OMCI configuration of
+//	the autoconfiguration server (ACS) URL and related authentication information for an ACS
+//	connection initiated by the ONU. [BBF TR-069] supports other means to discover its ACS, so not
+//	all BBF TR069-compatible ONUs necessarily support this ME. Furthermore, even if the ONU does
+//	support this ME, some operators may choose not to use it.
+//
+//	An ONU that supports OMCI configuration of ACS information automatically creates instances of
+//	this ME.
+//
+//	Relationships
+//		An instance of the BBF TR-069 management server ME exists for each instance of a BBF TR-069
+//		management domain within the ONU.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of a VEIP that links to the BBF TR-069
+//			management domain. (R) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. When the administrative state is locked, the functions of this ME are disabled. BBF TR-069
+//			connectivity to an ACS may be possible through means that do not depend on this ME. The default
+//			value of this attribute is locked. (R,W) (mandatory) (1 byte)
+//
+//		Acs Network Address
+//			ACS network address: This attribute points to an instance of a network address ME that contains
+//			URL and authentication information associated with the ACS URL. (R, W) (mandatory) (2 bytes)
+//
+//		Associated Tag
+//			Associated tag: This attribute is a TCI value for BBF TR-069 management traffic passing through
+//			the VEIP. A TCI, comprising user priority, CFI and VID, is represented by 2 bytes. The value
+//			0xFFFF specifies that BBF TR-069 management traffic passes through the VEIP with neither a VLAN
+//			nor a priority tag. (R, W) (mandatory) (2 bytes)
+//
+type BbfTr069ManagementServer struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	bbftr069managementserverBME = &ManagedEntityDefinition{
+		Name:    "BbfTr069ManagementServer",
+		ClassID: 340,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: Uint16Field("AcsNetworkAddress", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: Uint16Field("AssociatedTag", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewBbfTr069ManagementServer (class ID 340 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewBbfTr069ManagementServer(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(bbftr069managementserverBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/callcontrolperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/callcontrolperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..42527d2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/callcontrolperformancemonitoringhistorydata.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const CallControlPerformanceMonitoringHistoryDataClassId ClassID = ClassID(140)
+
+var callcontrolperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// CallControlPerformanceMonitoringHistoryData (class ID #140)
+//	This ME collects PM data related to the call control channel. Instances of this ME are created
+//	and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PPTP POTS UNI ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP POTS UNI. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Call Setup Failures
+//			Call setup failures: This attribute counts call set-up failures. (R) (mandatory) (4 bytes)
+//
+//		Call Setup Timer
+//			Call setup timer: This attribute is a high water-mark that records the longest duration of a
+//			single call set-up detected during this interval. Time is measured in milliseconds from the time
+//			an initial set-up was requested by the subscriber until the time at which a response was
+//			provided to the subscriber in the form of busy tone, audible ring tone, etc. (R) (mandatory)
+//			(4 bytes)
+//
+//		Call Terminate Failures
+//			Call terminate failures: This attribute counts the number of calls that were terminated with
+//			cause. (R) (mandatory) (4 bytes)
+//
+//		Analog Port Releases
+//			Analog port releases: This attribute counts the number of analogue port releases without
+//			dialling detected (abandoned calls). (R) (mandatory) (4 bytes)
+//
+//		Analog Port Off_Hook Timer
+//			Analog port off-hook timer: This attribute is a high water-mark that records the longest period
+//			of a single off-hook detected on the analogue port. Time is measured in milliseconds. (R)
+//			(mandatory) (4 bytes)
+//
+type CallControlPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	callcontrolperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "CallControlPerformanceMonitoringHistoryData",
+		ClassID: 140,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("CallSetupFailures", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("CallSetupTimer", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("CallTerminateFailures", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("AnalogPortReleases", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("AnalogPortOffHookTimer", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewCallControlPerformanceMonitoringHistoryData (class ID 140 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewCallControlPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(callcontrolperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/cardholder.go b/vendor/github.com/cboling/omci/generated/cardholder.go
new file mode 100644
index 0000000..0a3059b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/cardholder.go
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const CardholderClassId ClassID = ClassID(5)
+
+var cardholderBME *ManagedEntityDefinition
+
+// Cardholder (class ID #5)
+//	The cardholder represents the fixed equipment slot configuration of the ONU. Each cardholder can
+//	contain 0 or 1 circuit packs; the circuit pack models equipment information that can change over
+//	the lifetime of the ONU, e.g., through replacement.
+//
+//	One instance of this ME exists for each physical slot in an ONU that has pluggable circuit
+//	packs. One or more instances of this ME may also exist in an integrated ONU, to represent
+//	virtual slots. Instances of this ME are created automatically by the ONU, and the status
+//	attributes are populated according to data within the ONU itself.
+//
+//	Slot 0 is intended to be used only in an integrated ONU. If an integrated ONU is modelled with a
+//	universal slot 0, it is recommended that it does not contain additional (non-zero) virtual
+//	slots. A cardholder for virtual slot 0 is recommended.
+//
+//	There is potential for conflict in the semantics of the expected plug-in unit type, the expected
+//	port count and the expected equipment ID, both when the slot is not populated and when a new
+//	circuit pack is inserted. The expected plug-in unit type and the plug-in type mismatch alarm are
+//	mandatory, although plug-and-play/unknown (circuit pack type 255) may be used as a way to
+//	minimize their significance. It is recommended that an ONU deny the provisioning of inconsistent
+//	combinations of expected equipment attributes.
+//
+//	When a circuit pack is plugged into a cardholder or when a cardholder is pre-provisioned to
+//	expect a circuit pack of a given type, it may trigger the ONU to instantiate a number of MEs and
+//	update the values of others, depending on the circuit pack type. The ONU may also delete a
+//	variety of other MEs when a circuit pack is reprovisioned to not expect a circuit pack or to
+//	expect a circuit pack of a different type. These actions are described in the definitions of the
+//	various MEs.
+//
+//	Expected equipment ID and expected port count are alternate ways to trigger the same
+//	preprovisioning effects. These tools may be useful if an ONU is prepared to accept more than one
+//	circuit pack of a given type but with different port counts, or if a circuit pack is a hybrid
+//	that matches none of the types in Table 9.1.5-1, but whose identification (e.g., part number) is
+//	known.
+//
+//	Relationships
+//		An ONU may contain zero or more instances of the cardholder, each of which may contain an
+//		instance of the circuit pack ME. The slot ID, real or virtual, is a fundamental identification
+//		mechanism for MEs that bear some relationship to a physical location.
+//
+//	Attributes
+//		Managed Entity Id
+//			NOTE 1 – Some xDSL MEs use the two MSBs of the slot number for other purposes. An ONU that
+//			supports these services may have slot limitations or restrictions.
+//
+//		Actual Plug In Unit Type
+//			Actual plugin unit type: This attribute is equal to the type of the circuit pack in the
+//			cardholder, or 0 if the cardholder is empty. When the cardholder is populated, this attribute is
+//			the same as the type attribute of the corresponding circuit pack ME. Circuit pack types are
+//			defined in Table 9.1.5-1. (R) (mandatory) (1 byte)
+//
+//		Expected Plug_In Unit Type
+//			Expected plug-in unit type: This attribute provisions the type of circuit pack for the slot. For
+//			type coding, see Table 9.1.5-1. The value 0 means that the cardholder is not provisioned to
+//			contain a circuit pack. The value 255 means that the cardholder is configured for plug-and-play.
+//			Upon ME instantiation, the ONU sets this attribute to 0. For integrated interfaces, this
+//			attribute may be used to represent the type of interface. (R, W) (mandatory) (1 byte)
+//
+//		Expected Port Count
+//			Expected port count: This attribute permits the OLT to specify the number of ports it expects in
+//			a circuit pack. Prior to provisioning by the OLT, the ONU initializes this attribute to 0.
+//			(R, W) (optional) (1 byte)
+//
+//		Expected Equipment Id
+//			Expected equipment ID: This attribute provisions the specific type of expected circuit pack.
+//			This attribute applies only to ONUs that do not have integrated interfaces. In some
+//			environments, this may contain the expected CLEI code. Upon ME instantiation, the ONU sets this
+//			attribute to all spaces. (R, W) (optional) (20 bytes)
+//
+//		Actual Equipment Id
+//			Actual equipment ID: This attribute identifies the specific type of circuit pack, once it is
+//			installed. This attribute applies only to ONUs that do not have integrated interfaces. In some
+//			environments, this may include the CLEI code. When the slot is empty or the equipment ID is not
+//			known, this attribute should be set to all spaces. (R) (optional) (20 bytes)
+//
+//		Protection Profile Pointer
+//			Protection profile pointer: This attribute specifies an equipment protection profile that may be
+//			associated with the cardholder. Its value is the least significant byte of the ME ID of the
+//			equipment protection profile with which it is associated, or 0 if equipment protection is not
+//			used. (R) (optional) (1 byte)
+//
+//		Invoke Protection Switch
+//			When circuit packs that support a PON interface (IF) function are switched, the response should
+//			be returned on the same PON that received the command. However, the OLT should also be prepared
+//			to accept a response on the redundant PON. (R, W) (optional) (1 byte)
+//
+//		Alarm _ Reporting Control
+//			Alarm-reporting control (ARC): See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+type Cardholder struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	cardholderBME = &ManagedEntityDefinition{
+		Name:    "Cardholder",
+		ClassID: 5,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("ActualPlugInUnitType", 0, mapset.NewSetWith(Read), true, false, false, false, 1),
+			2: ByteField("ExpectedPlugInUnitType", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: ByteField("ExpectedPortCount", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4: MultiByteField("ExpectedEquipmentId", 20, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: MultiByteField("ActualEquipmentId", 20, nil, mapset.NewSetWith(Read), true, false, true, false, 5),
+			6: ByteField("ProtectionProfilePointer", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7: ByteField("InvokeProtectionSwitch", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8: ByteField("AlarmReportingControl", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 8),
+			9: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+		},
+	}
+}
+
+// NewCardholder (class ID 5 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewCardholder(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(cardholderBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/cesserviceprofile.go b/vendor/github.com/cboling/omci/generated/cesserviceprofile.go
new file mode 100644
index 0000000..50251f8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/cesserviceprofile.go
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const CesServiceProfileClassId ClassID = ClassID(21)
+
+var cesserviceprofileBME *ManagedEntityDefinition
+
+// CesServiceProfile (class ID #21)
+//	NOTE – In [ITU-T G.984.4], this ME is called a CES service profile-G.
+//
+//	An instance of this ME organizes data that describe the CES service functions of the ONU.
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of a GEM IW TP.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Ces Buffered Cdv Tolerance
+//			CES buffered CDV tolerance: This attribute represents the duration of user data that must be
+//			buffered by the CES IW entity to offset packet delay variation. It is expressed in 10 µs
+//			increments. 75 (750 μs) is suggested as a default value. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Channel Associated Signalling Cas
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+type CesServiceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	cesserviceprofileBME = &ManagedEntityDefinition{
+		Name:    "CesServiceProfile",
+		ClassID: 21,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("CesBufferedCdvTolerance", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("ChannelAssociatedSignallingCas", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+		},
+	}
+}
+
+// NewCesServiceProfile (class ID 21 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewCesServiceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(cesserviceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/circuitpack.go b/vendor/github.com/cboling/omci/generated/circuitpack.go
new file mode 100644
index 0000000..e9b5b0b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/circuitpack.go
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const CircuitPackClassId ClassID = ClassID(6)
+
+var circuitpackBME *ManagedEntityDefinition
+
+// CircuitPack (class ID #6)
+//	This ME models a real or virtual circuit pack that is equipped in a real or virtual ONU slot.
+//	For ONUs with integrated interfaces, this ME may be used to distinguish available types of
+//	interfaces (the port-mapping package is another way).
+//
+//	For ONUs with integrated interfaces, the ONU automatically creates an instance of this ME for
+//	each instance of the virtual cardholder ME. The ONU also creates an instance of this ME when the
+//	OLT provisions the cardholder to expect a circuit pack, i.e., when the OLT sets the expected
+//	plug-in unit type or equipment ID of the cardholder to a circuit pack type, as defined in Table
+//	9.1.5-1. The ONU also creates an instance of this ME when a circuit pack is installed in a
+//	cardholder whose expected plug-in unit type is 255 = plugandplay, and whose equipment ID is not
+//	provisioned. Finally, when the cardholder is provisioned for plug-and-play, an instance of this
+//	ME can be created at the request of the OLT.
+//
+//	The ONU deletes an instance of this ME when the OLT de-provisions the circuit pack (i.e., when
+//	the OLT sets the expected plug-in unit type or equipment ID of the cardholder to 0 = no LIM).
+//	The ONU also deletes an instance of this ME on request of the OLT if the expected plug-in unit
+//	type attribute of the corresponding cardholder is equal to 255, plug-and-play, and the expected
+//	equipment ID is blank (a string of all spaces). ONUs with integrated interfaces do not delete
+//	circuit pack instances.
+//
+//	NOTE – Creation and deletion by the OLT is retained for backward compatibility.
+//
+//	Relationships
+//		An instance of this ME is contained by an instance of the cardholder ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Its value is the
+//			same as that of the cardholder ME containing this circuit pack instance. (R, setbycreate if
+//			applicable) (mandatory) (2 bytes)
+//
+//		Type
+//			Type:	This attribute identifies the circuit pack type. This attribute is a code as defined in
+//			Table 9.1.5-1. The value 255 means unknown or undefined, i.e., the inserted circuit pack is not
+//			recognized by the ONU or is not mapped to an entry in Table 9.1.5-1. In the latter case, the
+//			equipment ID attribute may contain inventory information. Upon autonomous ME instantiation, the
+//			ONU sets this attribute to 0 or to the type of the circuit pack that is physically present. (R,
+//			setbycreate if applicable) (mandatory) (1 byte)
+//
+//		Number Of Ports
+//			Number of ports: This attribute is the number of access ports on the circuit pack. If the port-
+//			mapping package is supported for this circuit pack, this attribute should be set to the total
+//			number of ports of all types. (R) (optional) (1 byte)
+//
+//		Serial Number
+//			Serial number: The serial number is expected to be unique for each circuit pack, at least within
+//			the scope of the given vendor. Note that the serial number may contain the vendor ID or version
+//			number. For integrated ONUs, this value is identical to the value of the serial number attribute
+//			of the ONU-G ME. Upon creation in the absence of a physical circuit pack, this attribute
+//			comprises all spaces. (R) (mandatory) (8 bytes)
+//
+//		Version
+//			Version:	This attribute is a string that identifies the version of the circuit pack as defined
+//			by the vendor. The value 0 indicates that version information is not available or applicable.
+//			For integrated ONUs, this value is identical to the value of the version attribute of the ONU-G
+//			ME. Upon creation in the absence of a physical circuit pack, this attribute comprises all
+//			spaces. (R) (mandatory) (14 bytes)
+//
+//		Vendor Id
+//			Vendor ID:	This attribute identifies the vendor of the circuit pack. For ONUs with integrated
+//			interfaces, this value is identical to the value of the vendor ID attribute of the ONU-G ME.
+//			Upon creation in the absence of a physical circuit pack, this attribute comprises all spaces.
+//			(R) (optional) (4 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the circuit pack is capable of performing
+//			its function. Valid values are enabled (0), disabled (1) and unknown (2). Pending completion of
+//			initialization and self-test on an installed circuit pack, the ONU sets this attribute to 2. (R)
+//			(optional) (1 byte)
+//
+//		Bridged Or Ip Ind
+//			(R, W) (optional, only applicable for circuit packs with Ethernet interfaces) (1 byte)
+//
+//		Equipment Id
+//			Equipment ID: This attribute may be used to identify the vendor's specific type of circuit pack.
+//			In some environments, this attribute may include the CLEI code. Upon ME instantiation, the ONU
+//			sets this attribute to all spaces or to the equipment ID of the circuit pack that is physically
+//			present. (R) (optional) (20 bytes)
+//
+//		Card Configuration
+//			Upon autonomous instantiation, this attribute is set to 0. (R, W, setbycreate if applicable)
+//			(mandatory for configurable circuit packs) (1 byte)
+//
+//		Total T_Cont Buffer Number
+//			Total T-CONT buffer number: This attribute reports the total number of T-CONT buffers associated
+//			with the circuit pack. Upon ME instantiation, the ONU sets this attribute to 0 or to the value
+//			supported by the physical circuit pack. (R) (mandatory for circuit packs that provide a traffic
+//			scheduler function) (1 byte)
+//
+//		Total Priority Queue Number
+//			Total priority queue number: This value reports the total number of priority queues associated
+//			with the circuit pack. Upon ME instantiation, the ONU sets the attribute to 0 or to the value
+//			supported by the physical circuit pack. (R) (mandatory for circuit packs that provide a traffic
+//			scheduler function) (1 byte)
+//
+//		Total Traffic Scheduler Number
+//			Total traffic scheduler number: This value reports the total number of traffic schedulers
+//			associated with the circuit pack. The ONU supports null function, strict priority scheduling and
+//			WRR from the priority control, and guarantee of minimum rate control points of view. If the
+//			circuit pack has no traffic scheduler, this attribute should be absent or have the value 0. Upon
+//			ME instantiation, the ONU sets the attribute to 0 or to the value supported by the physical
+//			circuit pack. (R) (mandatory for circuit packs that provide a traffic scheduler function)
+//			(1 byte)
+//
+//		Power Shed Override
+//			Power shed override: This attribute allows ports to be excluded from the power shed control
+//			defined in clause 9.1.7. It is a bit mask that takes port 1 as the MSB; a bit value of 1 marks
+//			the corresponding port to override the power shed timer. For hardware that cannot shed power per
+//			port, this attribute is a slot override rather than a port override, with any non-zero port
+//			value causing the entire circuit pack to override power shedding. (R, W) (optional) (4 bytes)
+//
+type CircuitPack struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	circuitpackBME = &ManagedEntityDefinition{
+		Name:    "CircuitPack",
+		ClassID: 6,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("Type", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 1),
+			2:  ByteField("NumberOfPorts", 0, mapset.NewSetWith(Read), false, false, true, false, 2),
+			3:  Uint64Field("SerialNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  MultiByteField("Version", 14, nil, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("VendorId", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 7),
+			8:  ByteField("BridgedOrIpInd", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  MultiByteField("EquipmentId", 20, nil, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: ByteField("CardConfiguration", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: ByteField("TotalTContBufferNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: ByteField("TotalPriorityQueueNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: ByteField("TotalTrafficSchedulerNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("PowerShedOverride", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+		},
+	}
+}
+
+// NewCircuitPack (class ID 6 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewCircuitPack(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(circuitpackBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/classidmap.go b/vendor/github.com/cboling/omci/generated/classidmap.go
new file mode 100644
index 0000000..77e6a0c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/classidmap.go
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "fmt"
+
+// ManagedEntityInfo provides ManagedEntity information
+type ManagedEntityInfo struct {
+	New func(params ...ParamData) (*ManagedEntity, error)
+}
+
+// ParamData can be passed to the 'New' function to dictate how the returned
+// Managed Entity is created. You should supply either zero or one ParamData
+// structure to 'New'.
+//
+// If No ParamData is passed, the returned Managed Entity can only be used for
+// providing validation of other structures. This is commonly done in a packet
+// encoder/decoder to assist in that process.
+//
+// If One ParamData is passed, the returned Managed Entity will be initialized
+// with the given values/attributes and then validated. This is commonly done
+// when you wish to create an ME for transmission, storage or removal from a
+// persistent database, or some other similar purpose.
+//
+type ParamData struct {
+	EntityID   uint16
+	Attributes AttributeValueMap
+}
+
+// CreateME wraps a function that makes it a creator of a Managed Entity
+type CreateME func(params ...ParamData) (*ManagedEntity, OmciErrors)
+
+var classToManagedEntityMap map[ClassID]CreateME
+
+func init() {
+	// Create mapping of 16-bit managed entity class IDs to ME-type
+	classToManagedEntityMap = make(map[ClassID]CreateME, 160)
+
+	classToManagedEntityMap[2] = NewOnuData
+	classToManagedEntityMap[5] = NewCardholder
+	classToManagedEntityMap[6] = NewCircuitPack
+	classToManagedEntityMap[7] = NewSoftwareImage
+	classToManagedEntityMap[11] = NewPhysicalPathTerminationPointEthernetUni
+	classToManagedEntityMap[12] = NewPhysicalPathTerminationPointCesUni
+	classToManagedEntityMap[14] = NewInterworkingVccTerminationPoint
+	classToManagedEntityMap[16] = NewAal5Profile
+	classToManagedEntityMap[18] = NewAal5PerformanceMonitoringHistoryData
+	classToManagedEntityMap[21] = NewCesServiceProfile
+	classToManagedEntityMap[24] = NewEthernetPerformanceMonitoringHistoryData
+	classToManagedEntityMap[45] = NewMacBridgeServiceProfile
+	classToManagedEntityMap[46] = NewMacBridgeConfigurationData
+	classToManagedEntityMap[47] = NewMacBridgePortConfigurationData
+	classToManagedEntityMap[48] = NewMacBridgePortDesignationData
+	classToManagedEntityMap[49] = NewMacBridgePortFilterTableData
+	classToManagedEntityMap[50] = NewMacBridgePortBridgeTableData
+	classToManagedEntityMap[51] = NewMacBridgePerformanceMonitoringHistoryData
+	classToManagedEntityMap[52] = NewMacBridgePortPerformanceMonitoringHistoryData
+	classToManagedEntityMap[53] = NewPhysicalPathTerminationPointPotsUni
+	classToManagedEntityMap[58] = NewVoiceServiceProfile
+	classToManagedEntityMap[62] = NewVpPerformanceMonitoringHistoryData
+	classToManagedEntityMap[78] = NewVlanTaggingOperationConfigurationData
+	classToManagedEntityMap[79] = NewMacBridgePortFilterPreAssignTable
+	classToManagedEntityMap[82] = NewPhysicalPathTerminationPointVideoUni
+	classToManagedEntityMap[83] = NewPhysicalPathTerminationPointLctUni
+	classToManagedEntityMap[84] = NewVlanTaggingFilterData
+	classToManagedEntityMap[89] = NewEthernetPerformanceMonitoringHistoryData2
+	classToManagedEntityMap[90] = NewPhysicalPathTerminationPointVideoAni
+	classToManagedEntityMap[98] = NewPhysicalPathTerminationPointXdslUniPart1
+	classToManagedEntityMap[99] = NewPhysicalPathTerminationPointXdslUniPart2
+	classToManagedEntityMap[100] = NewXdslLineInventoryAndStatusDataPart1
+	classToManagedEntityMap[101] = NewXdslLineInventoryAndStatusDataPart2
+	classToManagedEntityMap[102] = NewXdslChannelDownstreamStatusData
+	classToManagedEntityMap[103] = NewXdslChannelUpstreamStatusData
+	classToManagedEntityMap[105] = NewXdslLineConfigurationProfilePart2
+	classToManagedEntityMap[106] = NewXdslLineConfigurationProfilePart3
+	classToManagedEntityMap[107] = NewXdslChannelConfigurationProfile
+	classToManagedEntityMap[108] = NewXdslSubcarrierMaskingDownstreamProfile
+	classToManagedEntityMap[109] = NewXdslSubcarrierMaskingUpstreamProfile
+	classToManagedEntityMap[112] = NewXdslXtuCPerformanceMonitoringHistoryData
+	classToManagedEntityMap[113] = NewXdslXtuRPerformanceMonitoringHistoryData
+	classToManagedEntityMap[114] = NewXdslXtuCChannelPerformanceMonitoringHistoryData
+	classToManagedEntityMap[115] = NewXdslXtuRChannelPerformanceMonitoringHistoryData
+	classToManagedEntityMap[116] = NewTcAdaptorPerformanceMonitoringHistoryDataXdsl
+	classToManagedEntityMap[130] = NewIeee8021PMapperServiceProfile
+	classToManagedEntityMap[131] = NewOltG
+	classToManagedEntityMap[133] = NewOnuPowerShedding
+	classToManagedEntityMap[134] = NewIpHostConfigData
+	classToManagedEntityMap[135] = NewIpHostPerformanceMonitoringHistoryData
+	classToManagedEntityMap[136] = NewTcpUdpConfigData
+	classToManagedEntityMap[137] = NewNetworkAddress
+	classToManagedEntityMap[138] = NewVoipConfigData
+	classToManagedEntityMap[139] = NewVoipVoiceCtp
+	classToManagedEntityMap[140] = NewCallControlPerformanceMonitoringHistoryData
+	classToManagedEntityMap[141] = NewVoipLineStatus
+	classToManagedEntityMap[142] = NewVoipMediaProfile
+	classToManagedEntityMap[143] = NewRtpProfileData
+	classToManagedEntityMap[144] = NewRtpPerformanceMonitoringHistoryData
+	classToManagedEntityMap[145] = NewNetworkDialPlanTable
+	classToManagedEntityMap[146] = NewVoipApplicationServiceProfile
+	classToManagedEntityMap[147] = NewVoipFeatureAccessCodes
+	classToManagedEntityMap[148] = NewAuthenticationSecurityMethod
+	classToManagedEntityMap[150] = NewSipAgentConfigData
+	classToManagedEntityMap[151] = NewSipAgentPerformanceMonitoringHistoryData
+	classToManagedEntityMap[152] = NewSipCallInitiationPerformanceMonitoringHistoryData
+	classToManagedEntityMap[153] = NewSipUserData
+	classToManagedEntityMap[155] = NewMgcConfigData
+	classToManagedEntityMap[156] = NewMgcPerformanceMonitoringHistoryData
+	classToManagedEntityMap[160] = NewEquipmentExtensionPackage
+	classToManagedEntityMap[162] = NewPhysicalPathTerminationPointMocaUni
+	classToManagedEntityMap[163] = NewMocaEthernetPerformanceMonitoringHistoryData
+	classToManagedEntityMap[171] = NewExtendedVlanTaggingOperationConfigurationData
+	classToManagedEntityMap[256] = NewOnuG
+	classToManagedEntityMap[257] = NewOnu2G
+	classToManagedEntityMap[262] = NewTCont
+	classToManagedEntityMap[263] = NewAniG
+	classToManagedEntityMap[264] = NewUniG
+	classToManagedEntityMap[266] = NewGemInterworkingTerminationPoint
+	classToManagedEntityMap[268] = NewGemPortNetworkCtp
+	classToManagedEntityMap[269] = NewVpNetworkCtp
+	classToManagedEntityMap[272] = NewGalEthernetProfile
+	classToManagedEntityMap[273] = NewThresholdData1
+	classToManagedEntityMap[274] = NewThresholdData2
+	classToManagedEntityMap[276] = NewGalEthernetPerformanceMonitoringHistoryData
+	classToManagedEntityMap[277] = NewPriorityQueue
+	classToManagedEntityMap[278] = NewTrafficScheduler
+	classToManagedEntityMap[280] = NewTrafficDescriptor
+	classToManagedEntityMap[281] = NewMulticastGemInterworkingTerminationPoint
+	classToManagedEntityMap[282] = NewPseudowireTerminationPoint
+	classToManagedEntityMap[283] = NewRtpPseudowireParameters
+	classToManagedEntityMap[284] = NewPseudowireMaintenanceProfile
+	classToManagedEntityMap[285] = NewPseudowirePerformanceMonitoringHistoryData
+	classToManagedEntityMap[286] = NewEthernetFlowTerminationPoint
+	classToManagedEntityMap[287] = NewOmci
+	classToManagedEntityMap[288] = NewManagedEntityMe
+	classToManagedEntityMap[289] = NewAttributeMe
+	classToManagedEntityMap[290] = NewDot1XPortExtensionPackage
+	classToManagedEntityMap[291] = NewDot1XConfigurationProfile
+	classToManagedEntityMap[292] = NewDot1XPerformanceMonitoringHistoryData
+	classToManagedEntityMap[293] = NewRadiusPerformanceMonitoringHistoryData
+	classToManagedEntityMap[296] = NewEthernetPerformanceMonitoringHistoryData3
+	classToManagedEntityMap[298] = NewDot1RateLimiter
+	classToManagedEntityMap[299] = NewDot1AgMaintenanceDomain
+	classToManagedEntityMap[300] = NewDot1AgMaintenanceAssociation
+	classToManagedEntityMap[301] = NewDot1AgDefaultMdLevel
+	classToManagedEntityMap[302] = NewDot1AgMep
+	classToManagedEntityMap[305] = NewDot1AgCfmStack
+	classToManagedEntityMap[310] = NewMulticastSubscriberConfigInfo
+	classToManagedEntityMap[311] = NewMulticastSubscriberMonitor
+	classToManagedEntityMap[313] = NewReAniG
+	classToManagedEntityMap[316] = NewReDownstreamAmplifier
+	classToManagedEntityMap[321] = NewEthernetFramePerformanceMonitoringHistoryDataDownstream
+	classToManagedEntityMap[322] = NewEthernetFramePerformanceMonitoringHistoryDataUpstream
+	classToManagedEntityMap[325] = NewXdslLineInventoryAndStatusDataPart5
+	classToManagedEntityMap[328] = NewReCommonAmplifierParameters
+	classToManagedEntityMap[329] = NewVirtualEthernetInterfacePoint
+	classToManagedEntityMap[332] = NewEnhancedSecurityControl
+	classToManagedEntityMap[333] = NewMplsPseudowireTerminationPoint
+	classToManagedEntityMap[334] = NewEthernetFrameExtendedPm
+	classToManagedEntityMap[335] = NewSnmpConfigurationData
+	classToManagedEntityMap[336] = NewOnuDynamicPowerManagementControl
+	classToManagedEntityMap[338] = NewPwAtmPerformanceMonitoringHistoryData
+	classToManagedEntityMap[339] = NewPwEthernetConfigurationData
+	classToManagedEntityMap[340] = NewBbfTr069ManagementServer
+	classToManagedEntityMap[341] = NewGemPortNetworkCtpPerformanceMonitoringHistoryData
+	classToManagedEntityMap[342] = NewTcpUdpPerformanceMonitoringHistoryData
+	classToManagedEntityMap[343] = NewEnergyConsumptionPerformanceMonitoringHistoryData
+	classToManagedEntityMap[344] = NewXgPonTcPerformanceMonitoringHistoryData
+	classToManagedEntityMap[345] = NewXgPonDownstreamManagementPerformanceMonitoringHistoryData
+	classToManagedEntityMap[346] = NewXgPonUpstreamManagementPerformanceMonitoringHistoryData
+	classToManagedEntityMap[348] = NewMacBridgePortIcmpv6ProcessPreAssignTable
+	classToManagedEntityMap[400] = NewEthernetPseudowireParameters
+	classToManagedEntityMap[408] = NewXdslXtuCPerformanceMonitoringHistoryDataPart2
+	classToManagedEntityMap[410] = NewVdsl2LineConfigurationExtensions3
+	classToManagedEntityMap[412] = NewXdslChannelConfigurationProfilePart2
+	classToManagedEntityMap[414] = NewXdslLineInventoryAndStatusDataPart8
+	classToManagedEntityMap[420] = NewEfmBondingLink
+	classToManagedEntityMap[421] = NewEfmBondingGroupPerformanceMonitoringHistoryData
+	classToManagedEntityMap[422] = NewEfmBondingGroupPerformanceMonitoringHistoryDataPart2
+	classToManagedEntityMap[423] = NewEfmBondingLinkPerformanceMonitoringHistoryData
+	classToManagedEntityMap[424] = NewEfmBondingPortPerformanceMonitoringHistoryData
+	classToManagedEntityMap[425] = NewEfmBondingPortPerformanceMonitoringHistoryDataPart2
+	classToManagedEntityMap[426] = NewEthernetFrameExtendedPm64Bit
+	classToManagedEntityMap[432] = NewFastChannelConfigurationProfile
+	classToManagedEntityMap[433] = NewFastDataPathConfigurationProfile
+	classToManagedEntityMap[434] = NewFastVectoringLineConfigurationExtensions
+	classToManagedEntityMap[436] = NewFastLineInventoryAndStatusDataPart2
+	classToManagedEntityMap[437] = NewFastXtuCPerformanceMonitoringHistoryData
+	classToManagedEntityMap[438] = NewFastXtuRPerformanceMonitoringHistoryData
+	classToManagedEntityMap[443] = NewTwdmChannelManagedEntity
+	classToManagedEntityMap[444] = NewTwdmChannelPhyLodsPerformanceMonitoringHistoryData
+	classToManagedEntityMap[445] = NewTwdmChannelXgemPerformanceMonitoringHistoryData
+	classToManagedEntityMap[446] = NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart1
+	classToManagedEntityMap[447] = NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart2
+	classToManagedEntityMap[448] = NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart3
+	classToManagedEntityMap[449] = NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart1
+	classToManagedEntityMap[450] = NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart2
+	classToManagedEntityMap[451] = NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart3
+	classToManagedEntityMap[452] = NewTwdmChannelOmciPerformanceMonitoringHistoryData
+}
+
+func LoadManagedEntityDefinition(classID ClassID, params ...ParamData) (*ManagedEntity, OmciErrors) {
+	newFunc, ok := classToManagedEntityMap[classID]
+	if ok {
+		return newFunc(params...)
+	}
+	return nil, NewUnknownEntityError(fmt.Sprintf("managed entity %d (%#x) definition not found",
+		uint16(classID), uint16(classID)))
+}
+
+func GetSupportedClassIDs() []ClassID {
+	supported := make([]ClassID, 0, len(classToManagedEntityMap))
+	for k := range classToManagedEntityMap {
+		supported = append(supported, k)
+	}
+	return supported
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1agcfmstack.go b/vendor/github.com/cboling/omci/generated/dot1agcfmstack.go
new file mode 100644
index 0000000..1744eea
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1agcfmstack.go
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1AgCfmStackClassId ClassID = ClassID(305)
+
+var dot1agcfmstackBME *ManagedEntityDefinition
+
+// Dot1AgCfmStack (class ID #305)
+//	This ME reports the maintenance status of a bridge port at any given time. An ONU that supports
+//	[IEEE 802.1ag] functionality automatically creates an instance of the dot1ag CFM stack ME for
+//	each MAC bridge or IEEE 802.1p mapper, depending on its provisioning model.
+//
+//	The dot1ag CFM stack also lists any VLANs and bridge ports against which configuration errors
+//	are currently identified. The ONU should reject operations that create configuration errors.
+//	However, these errors can arise because of operations on other MEs that are not necessarily
+//	possible to detect during CFM configuration.
+//
+//	Relationships
+//		An ONU that supports [IEEE 802.1ag] creates one instance of this ME for each MAC bridge or IEEE
+//		802.1p mapper, depending on its provisioning model. It should not create an instance for an
+//		IEEE 802.1p mapper that is associated with a MAC bridge.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies an instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge service profile ME
+//			or an IEEE 802.1p mapper ME. It is expected that an ONU will implement CFM on bridges or on
+//			IEEE 802.1p mappers, but not both. For precision, the reference is disambiguated by the value of
+//			the layer 2 type pointer attribute. (R) (mandatory) (2 bytes)
+//
+//		Layer 2 Type
+//			Layer 2 type:	This attribute specifies whether the dot1ag CFM stack is associated with a MAC
+//			bridge service profile (value 0) or an IEEE 802.1p mapper (value 1). (R) (mandatory) (1 byte)
+//
+//		Mp Status Table
+//			(R) (mandatory) (18N bytes)
+//
+//		Configuration Error List Table
+//			(R) (mandatory) (5N bytes)
+//
+type Dot1AgCfmStack struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1agcfmstackBME = &ManagedEntityDefinition{
+		Name:    "Dot1AgCfmStack",
+		ClassID: 305,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("Layer2Type", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: TableField("MpStatusTable", TableInfo{nil, 18}, mapset.NewSetWith(Read), false, false, false, 2),
+			3: TableField("ConfigurationErrorListTable", TableInfo{nil, 5}, mapset.NewSetWith(Read), true, false, false, 3),
+		},
+	}
+}
+
+// NewDot1AgCfmStack (class ID 305 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1AgCfmStack(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1agcfmstackBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1agdefaultmdlevel.go b/vendor/github.com/cboling/omci/generated/dot1agdefaultmdlevel.go
new file mode 100644
index 0000000..8668539
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1agdefaultmdlevel.go
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1AgDefaultMdLevelClassId ClassID = ClassID(301)
+
+var dot1agdefaultmdlevelBME *ManagedEntityDefinition
+
+// Dot1AgDefaultMdLevel (class ID #301)
+//	The collection of the functionality called a maintenance half-function (MHF) is not explicitly
+//	modelled as a ME by either [IEEE 802.1ag] or the OMCI. The ONU automatically creates MHFs
+//	according to parameters specified in a dot1ag MD or a dot1ag MA ME; the dot1ag default MD level
+//	ME catches the corner cases not covered by other MEs, specifically VLANs not included by any
+//	defined MA.
+//
+//	The dot1ag default MD level comprises a configurable table, each entry of which specifies
+//	default MHF functionality for some set of VLANs. Once a set of VLANs is defined, operations to
+//	different table entries or to dot1ag MAs that conflict with the set membership should be denied.
+//	In addition, catch-all attributes are defined to specify MHF functionality when there is no
+//	match to either a table entry or an MA.
+//
+//	Relationships
+//		An ONU that supports [IEEE 802.1ag] automatically creates one instance of this ME for each MAC
+//		bridge or IEEE 802.1p mapper, depending on the ONU's provisioning model. It should not create an
+//		instance for an IEEE 802.1p mapper that is associated with a MAC bridge.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies an instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge service profile ME
+//			or an IEEE 802.1p mapper ME. It is expected that an ONU will implement CFM on bridges or on
+//			IEEE 802.1p mappers, but not both, depending on its provisioning model. For precision, the
+//			reference is disambiguated by the value of the layer 2 type pointer attribute. (R) (mandatory)
+//			(2 bytes)
+//
+//		Layer 2 Type
+//			Layer 2 type: This attribute specifies whether the dot1ag default MD level ME is associated with
+//			a MAC bridge service profile (value 0) or an IEEE 802.1p mapper (value 1). (R) (mandatory)
+//			(1 byte)
+//
+//		Catchall Level
+//			Catchall level: This attribute ranges from 0..7 and specifies the MD level of MHFs created when
+//			no specific match is found. (R, W) (mandatory) (1 byte)
+//
+//		Catchall Mhf Creation
+//			(R, W) (mandatory) (1 byte)
+//
+//		Catchall Sender Id Permission
+//			Catchall sender ID permission: This attribute determines the contents of the sender ID TLV
+//			included in CFM messages transmitted by MPs when no more specific match is found. This attribute
+//			is identical to that defined in the description of the dot1ag MD ME (i.e., excluding code point
+//			5, defer). (R, W) (mandatory) (1 byte)
+//
+//		Default Md Level Table
+//			(R, W) (mandatory) (29 bytes * N entries)
+//
+type Dot1AgDefaultMdLevel struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1agdefaultmdlevelBME = &ManagedEntityDefinition{
+		Name:    "Dot1AgDefaultMdLevel",
+		ClassID: 301,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("Layer2Type", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("CatchallLevel", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: ByteField("CatchallMhfCreation", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: ByteField("CatchallSenderIdPermission", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: TableField("DefaultMdLevelTable", TableInfo{0, 1}, mapset.NewSetWith(Read, Write), false, false, false, 5),
+		},
+	}
+}
+
+// NewDot1AgDefaultMdLevel (class ID 301 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1AgDefaultMdLevel(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1agdefaultmdlevelBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1agmaintenanceassociation.go b/vendor/github.com/cboling/omci/generated/dot1agmaintenanceassociation.go
new file mode 100644
index 0000000..3c09b07
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1agmaintenanceassociation.go
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1AgMaintenanceAssociationClassId ClassID = ClassID(300)
+
+var dot1agmaintenanceassociationBME *ManagedEntityDefinition
+
+// Dot1AgMaintenanceAssociation (class ID #300)
+//	This ME models an [IEEE 802.1ag] service defined on a bridge port. An MA is a set of endpoints
+//	on opposite sides of a network, all existing at a defined maintenance level. One of the
+//	endpoints resides on the local ONU; the others are understood to be configured in a consistent
+//	way on external equipment. [ITUT Y.1731] refers to the MA as a maintenance entity group (MEG).
+//
+//	An MA is created and deleted by the OLT.
+//
+//	Relationships
+//		Any number of MAs may be associated with a given MD, or may stand on their own without an MD.
+//		One or more MAs may be associated with a MAC bridge or an IEEE 802.1p mapper. An MA exists at
+//		one of eight possible maintenance levels.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies an instance of this ME. The values 0 and
+//			0xFFFF are reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Md Pointer
+//			MD pointer:	This pointer specifies the dot1ag maintenance domain with which this MA is
+//			associated. A null pointer specifies that the MA is not associated with an MD. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Short Ma Name Format
+//			Short MA name format: This attribute specifies one of several possible formats for the short MA
+//			name attribute. Value 1, the primary VLAN ID, is recommended to be the default. (R, W,
+//			setbycreate) (mandatory) (1 byte)
+//
+//		Short Ma Name 1, Short Ma Name 2
+//			Short MA name 1, Short MA name 2: These two attributes may be regarded as an octet string whose
+//			value is the left-justified MA name. Because the MA name may or may not be a printable character
+//			string, an octet string is the appropriate representation. If the short MA name format specifies
+//			a character string, the string is null-terminated; otherwise, its length is determined by the
+//			short MA name format. Note that binary comparisons of the short MA name are made in other CFM
+//			state machines, so blanks, alphabetic case, etc., are significant. Also, note that the MD name
+//			and the MA short name must be packed (with additional bytes) into 48 byte CFM message headers.
+//			(R, W) (mandatory) (25 bytes * 2 attributes)
+//
+//		Continuity Check Message Ccm Interval
+//			Short intervals should be used judiciously, as they can interfere with the network's ability to
+//			handle subscriber traffic. The recommended value is 1 s. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Associated Vlans
+//			Associated VLANs: This attribute is a list of up to 12 VLAN IDs with which this MA is
+//			associated. Once a set of VLANs is defined, the ONU should deny operations to other dot1ag MAs
+//			or dot1ag default MD level entries that conflict with the set membership. The all-zeros value
+//			indicates that this MA is not associated with any VLANs. Assuming that the attribute is not 0,
+//			the first entry is understood to be the primary VLAN. Except forwarded linktrace messages
+//			(LTMs), CFM messages emitted by MPs in this MA are tagged with the primary VLAN ID. (R, W)
+//			(mandatory) (2 bytes/entry * 12 entries = 24 bytes)
+//
+//		Mhf Creation
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Sender Id Permission
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+type Dot1AgMaintenanceAssociation struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1agmaintenanceassociationBME = &ManagedEntityDefinition{
+		Name:    "Dot1AgMaintenanceAssociation",
+		ClassID: 300,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("MdPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("ShortMaNameFormat", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: MultiByteField("ShortMaName1,ShortMaName2", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: ByteField("ContinuityCheckMessageCcmInterval", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: MultiByteField("AssociatedVlans", 24, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6: ByteField("MhfCreation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: ByteField("SenderIdPermission", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewDot1AgMaintenanceAssociation (class ID 300 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1AgMaintenanceAssociation(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1agmaintenanceassociationBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1agmaintenancedomain.go b/vendor/github.com/cboling/omci/generated/dot1agmaintenancedomain.go
new file mode 100644
index 0000000..b1ae103
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1agmaintenancedomain.go
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1AgMaintenanceDomainClassId ClassID = ClassID(299)
+
+var dot1agmaintenancedomainBME *ManagedEntityDefinition
+
+// Dot1AgMaintenanceDomain (class ID #299)
+//	In [IEEE 802.1ag], a maintenance domain (MD) is a context within which configuration fault
+//	management (CFM) connectivity verification can occur. Individual services (maintenance
+//	associations, MAs) exist within an MD. An MD is created and deleted by the OLT. The MD ME is
+//	specified by [IEEE 802.1ag] in such a way that the same provisioning can be used for all
+//	associated systems in a network; the OMCI definition accordingly avoids ONU-specific information
+//	such as pointers.
+//
+//	Relationships
+//		Several MDs may be associated with a given bridge, at various MD levels, and a given MD may be
+//		associated with any number of bridges.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies an instance of this ME. The values 0 and
+//			0xFFFF are reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Md Level
+//			MD level:	This attribute ranges from 0..7 and specifies the maintenance level of this MD. Higher
+//			numbers have wider geographic scope. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Md Name Format
+//			MD name format: This attribute specifies one of several possible formats for the MD name
+//			attribute. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Md Name 1 Md Name 2
+//			MD name 1, MD name 2:These two attributes may be regarded as a 50 byte octet string whose value
+//			is the left-justified maintenance domain name. The MD name may or may not be a printable
+//			character string, so an octet string is the appropriate representation. If the MD name format
+//			specifies a DNS-like name or a character string, the string is null-terminated; otherwise, its
+//			length is determined by the MD name format. If the MD has no name (MD name format = 0), this
+//			attribute is undefined. Note that binary comparisons of the MD name are made in other CFM state
+//			machines, so blanks, alphabetic case, etc., are significant. Also, note that the MD name and the
+//			MA name must be packed (with additional bytes) into 48 byte CFM message headers. (R, W)
+//			(mandatory if MD name format is not 1) (25 bytes * 2 attributes)
+//
+//		Maintenance Domain Intermediate Point Half Function Mhf Creation
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Sender Id Permission
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+type Dot1AgMaintenanceDomain struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1agmaintenancedomainBME = &ManagedEntityDefinition{
+		Name:    "Dot1AgMaintenanceDomain",
+		ClassID: 299,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("MdLevel", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("MdNameFormat", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: MultiByteField("MdName1MdName2", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: ByteField("MaintenanceDomainIntermediatePointHalfFunctionMhfCreation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: ByteField("SenderIdPermission", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewDot1AgMaintenanceDomain (class ID 299 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1AgMaintenanceDomain(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1agmaintenancedomainBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1agmep.go b/vendor/github.com/cboling/omci/generated/dot1agmep.go
new file mode 100644
index 0000000..4b4924c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1agmep.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1AgMepClassId ClassID = ClassID(302)
+
+var dot1agmepBME *ManagedEntityDefinition
+
+// Dot1AgMep (class ID #302)
+//	This ME models an MEP as defined primarily in [IEEE 802.1ag] and secondarily in [ITUT Y.1731].
+//	It is created and deleted by the OLT. An MEP exists at one of eight possible maintenance levels,
+//	and resides at the boundary of a MD. It inherits a name, and optionally a set of associated
+//	VLANs, from its associated MA.
+//
+//	Relationships
+//		One or more MEPs may be associated with a MAC bridge port or an IEEE 802.1p mapper in the
+//		absence of a MAC bridge. An MEP is also associated with zero or more VLANs and an MA.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Layer 2 Entity Pointer
+//			Layer 2 entity pointer: Depending on the value of the layer 2 type attribute, this pointer
+//			specifies the MAC bridge port configuration data ME or the IEEE 802.1p mapper service profile ME
+//			with which this MEP is associated. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Layer 2 Type
+//			Layer 2 type:	This attribute specifies whether the MA is associated with a MAC bridge port
+//			(value 0) or an IEEE 802.1p mapper (value 1). (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Ma Pointer
+//			MA pointer:	This pointer specifies the maintenance association with which this MEP is
+//			associated. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Mep Id
+//			MEP ID:	This attribute specifies the MEP's own identity in the MA. For a given MA, the MEP ID
+//			must be unique throughout the network defined by the MD. The MEP ID is defined in the range
+//			1..8191. The value 0 indicates that no MEP ID is (yet) configured. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Mep Control
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Primary Vlan
+//			Primary VLAN: This attribute is a 12 bit VLAN ID. The value 0 indicates that the MEP inherits
+//			its primary VLAN from its parent MA. CFM messages, except forwarded LTMs, are tagged with the
+//			primary VLAN ID. If explicitly specified, the value of this attribute must be one of the VLANs
+//			associated with the parent MA. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Ccm And Ltm Priority
+//			CCM and LTM priority: Ranging from 0..7, this attribute permits CCM and LTM frames to be
+//			explicitly prioritized, which may be needed if flows are separated, e.g., by 802.1p priority.
+//			The priority specified in this attribute is also used in linktrace reply (LTR) frames originated
+//			by this MEP. The value 0xFF selects the IEEE 802.1ag default, whereby CCM and LTM frames are
+//			transmitted with the highest Ethernet priority available. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Egress Identifier
+//			Egress identifier: This attribute comprises 8 bytes to be included in LTMs. They allow received
+//			LTRs to be directed to the correct originator. The attribute includes the originator MAC address
+//			and a locally defined identifier. If this field is 0, the ONU uses the MEP's MAC address, with 0
+//			as the locally defined identifier. (R, W, setbycreate) (mandatory) (8 bytes)
+//
+//		Peer Mep Ids
+//			Peer MEP IDs: This attribute lists the expected peer MEPs for CCMs, 2 bytes per MEP ID. [IEEE
+//			802.1ag] allows for multipoint networks, and therefore a list of peer MEPs. This attribute
+//			allows for up to 12 peers for a given MEP, though GPON applications are expected to need only a
+//			single peer. Missing or unexpected messages trigger alarm declaration after a soak interval.
+//			Unused peer MEP slots should be set to 0. (R, W) (mandatory) (24 bytes)
+//
+//		Eth Ais Control
+//			(R, W, setbycreate) (mandatory if ETH AIS is enabled) (1 byte)
+//
+//		Fault Alarm Threshold
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Alarm Declaration Soak Time
+//			Alarm declaration soak time: This attribute defines the defect soak time that must elapse before
+//			the MEP declares an alarm. It is expressed in 10 ms units with a range of 250 to 1000, i.e.,
+//			2.5 s to 10 s. The default is recommended to be 2.5 seconds. (R, W) (mandatory) (2 bytes)
+//
+//		Alarm Clear Soak Time
+//			Alarm clear soak time: This attribute defines the defect-free soak time that must elapse before
+//			the MEP clears an alarm. It is expressed in intervals of 10 ms with a range of 250 to 1 000,
+//			i.e., 2.5 s to 10 s. The default is recommended to be 10 s. (R, W) (mandatory) (2 bytes)
+//
+type Dot1AgMep struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1agmepBME = &ManagedEntityDefinition{
+		Name:    "Dot1AgMep",
+		ClassID: 302,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("Layer2EntityPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  ByteField("Layer2Type", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("MaPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("MepId", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  ByteField("MepControl", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint16Field("PrimaryVlan", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("CcmAndLtmPriority", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  Uint64Field("EgressIdentifier", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: MultiByteField("PeerMepIds", 24, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: ByteField("EthAisControl", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 11),
+			12: ByteField("FaultAlarmThreshold", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 12),
+			13: Uint16Field("AlarmDeclarationSoakTime", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 13),
+			14: Uint16Field("AlarmClearSoakTime", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 14),
+		},
+	}
+}
+
+// NewDot1AgMep (class ID 302 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1AgMep(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1agmepBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1ratelimiter.go b/vendor/github.com/cboling/omci/generated/dot1ratelimiter.go
new file mode 100644
index 0000000..0ddf7de
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1ratelimiter.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1RateLimiterClassId ClassID = ClassID(298)
+
+var dot1ratelimiterBME *ManagedEntityDefinition
+
+// Dot1RateLimiter (class ID #298)
+//	This ME allows rate limits to be defined for various types of upstream traffic that are
+//	processed by IEEE 802.1 bridges or related structures.
+//
+//	Relationships
+//		An instance of this ME may be linked to an instance of a MAC bridge service profile or an IEEE
+//		802.1p mapper.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Parent Me Pointer
+//			Parent ME pointer: This attribute points to an instance of a ME. The type of ME is determined by
+//			the TP type attribute. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tp Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Upstream Unicast Flood Rate Pointer
+//			Upstream unicast flood rate pointer: This attribute points to an instance of the traffic
+//			descriptor that governs the rate of upstream unicast packets whose DA is unknown to the bridge.
+//			A null pointer specifies that no administrative limit is to be imposed. (R, W, setbycreate)
+//			(optional) (2 bytes)
+//
+//		Upstream Broadcast Rate Pointer
+//			Upstream broadcast rate pointer: This attribute points to an instance of the traffic descriptor
+//			that governs the rate of upstream broadcast packets. A null pointer specifies that no
+//			administrative limit is to be imposed. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Upstream Multicast Payload Rate Pointer
+//			Upstream multicast payload rate pointer: This attribute points to an instance of the traffic
+//			descriptor that governs the rate of upstream multicast payload packets. A null pointer specifies
+//			that no administrative limit is to be imposed. (R, W, setbycreate) (optional) (2 bytes)
+//
+type Dot1RateLimiter struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1ratelimiterBME = &ManagedEntityDefinition{
+		Name:    "Dot1RateLimiter",
+		ClassID: 298,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("ParentMePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("TpType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("UpstreamUnicastFloodRatePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4: Uint16Field("UpstreamBroadcastRatePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5: Uint16Field("UpstreamMulticastPayloadRatePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewDot1RateLimiter (class ID 298 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1RateLimiter(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1ratelimiterBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1xconfigurationprofile.go b/vendor/github.com/cboling/omci/generated/dot1xconfigurationprofile.go
new file mode 100644
index 0000000..361293a
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1xconfigurationprofile.go
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1XConfigurationProfileClassId ClassID = ClassID(291)
+
+var dot1xconfigurationprofileBME *ManagedEntityDefinition
+
+// Dot1XConfigurationProfile (class ID #291)
+//	An instance of this ME represents a set of attributes that control an ONU's 802.1X operation
+//	with regard to IEEE 802 services. An instance of this ME is created by the ONU if it is capable
+//	of supporting [IEEE 802.1X] authentication of CPE.
+//
+//	Relationships
+//		One instance of this ME governs the ONU's 802.1X CPE authentication behaviour.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute provides a unique number for each instance of this ME. There
+//			is at most one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Circuit Id Prefix
+//			Circuit ID prefix: This attribute is a pointer to a large string ME whose content appears as the
+//			prefix of the NAS port ID in radius access-request messages. The remainder of the NAS port ID
+//			field is local information (for example, slot-port, appended by the ONU itself). The default
+//			value of this attribute is the null pointer 0. (R, W) (mandatory) (2 bytes)
+//
+//		Fallback Policy
+//			Fallback policy: When set to 1 (deny), this attribute causes IEEE 802.1X conversations to fail
+//			when no external authentication server is accessible, such that no Ethernet service is provided.
+//			The default value 0 causes IEEE 802.1X conversations to succeed when no external authentication
+//			server is accessible. (R, W) (mandatory) (1 byte)
+//
+//		Auth Server 1
+//			Auth server 1: This attribute is a pointer to a large string ME that contains the URI of the
+//			first choice radius authentication server. The value 0 indicates that no radius authentication
+//			server is specified. (R, W) (mandatory) (2 bytes)
+//
+//		Shared Secret Auth1
+//			Shared secret auth1: This attribute is the shared secret for the first radius authentication
+//			server. It is a null-terminated character string. (R, W) (mandatory) (25 bytes)
+//
+//		Auth Server 2
+//			Auth server 2:	(R, W) (optional) (2 bytes)
+//
+//		Shared Secret Auth2
+//			Shared secret auth2:	(R, W) (optional) (25 bytes)
+//
+//		Auth Server 3
+//			Auth server 3:	(R, W) (optional) (2 bytes)
+//
+//		Shared Secret Auth3
+//			Shared secret auth3:	(R, W) (optional) (25 bytes)
+//
+//		Olt Proxy Address
+//			OLT proxy address: This attribute indicates the IP address of a possible proxy at the OLT for
+//			IEEE 802.1X radius messages. The default value 0.0.0.0 indicates that no proxy is required.
+//			(R, W) (optional) (4 bytes)
+//
+//		Calling Station Id Format
+//			Other values are reserved.
+//
+type Dot1XConfigurationProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1xconfigurationprofileBME = &ManagedEntityDefinition{
+		Name:    "Dot1XConfigurationProfile",
+		ClassID: 291,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  Uint16Field("CircuitIdPrefix", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("FallbackPolicy", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  Uint16Field("AuthServer1", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  MultiByteField("SharedSecretAuth1", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  Uint16Field("AuthServer2", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  MultiByteField("SharedSecretAuth2", 25, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  Uint16Field("AuthServer3", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  MultiByteField("SharedSecretAuth3", 25, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  Uint32Field("OltProxyAddress", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: Uint16Field("CallingStationIdFormat", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+		},
+	}
+}
+
+// NewDot1XConfigurationProfile (class ID 291 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1XConfigurationProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1xconfigurationprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1xperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/dot1xperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..18d4bd6
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1xperformancemonitoringhistorydata.go
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1XPerformanceMonitoringHistoryDataClassId ClassID = ClassID(292)
+
+var dot1xperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// Dot1XPerformanceMonitoringHistoryData (class ID #292)
+//	This ME collects performance statistics on an ONU's IEEE 802.1X CPE authentication operation.
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME may be associated with each UNI that can perform IEEE 802.1X
+//		authentication of CPE.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of a PPTP. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Eapol Frames Received
+//			EAPOL frames received: This attribute counts received valid EAPOL frames of any type. (R)
+//			(mandatory) (4 bytes)
+//
+//		Eapol Frames Transmitted
+//			EAPOL frames transmitted: This attribute counts transmitted EAPOL frames of any type. (R)
+//			(mandatory) (4 bytes)
+//
+//		Eapol Start Frames Received
+//			EAPOL start frames received: This attribute counts received EAPOL start frames. (R) (mandatory)
+//			(4 bytes)
+//
+//		Eapol Logoff Frames Received
+//			EAPOL logoff frames received: This attribute counts received EAPOL logoff frames. (R)
+//			(mandatory) (4 bytes)
+//
+//		Invalid Eapol Frames Received
+//			Invalid EAPOL frames received: This attribute counts received EAPOL frames in which the frame
+//			type was not recognized. (R) (mandatory) (4 bytes)
+//
+//		Eap Resp_Id Frames Received
+//			EAP resp/id frames received: This attribute counts received EAP response frames containing an
+//			identifier type field. (R) (mandatory) (4 bytes)
+//
+//		Eap Response Frames Received
+//			EAP response frames received: This attribute counts received EAP response frames, other than
+//			resp/id frames. (R) (mandatory) (4 bytes)
+//
+//		Eap Initial Request Frames Transmitted
+//			EAP initial request frames transmitted: This attribute counts transmitted request frames
+//			containing an identifier type field. In [IEEE 802.1X], this is also called ReqId. (R)
+//			(mandatory) (4 bytes)
+//
+//		Eap Request Frames Transmitted
+//			EAP request frames transmitted: This attribute counts transmitted request frames, other than
+//			request/id frames. (R) (mandatory) (4 bytes)
+//
+//		Eap Length Error Frames Received
+//			EAP length error frames received: This attribute counts received EAPOL frames whose packet body
+//			length field was invalid. (R) (mandatory) (4 bytes)
+//
+//		Eap Success Frames Generated Autonomously
+//			EAP success frames generated autonomously: This attribute counts EAPOL success frames generated
+//			according to the local fallback policy because no radius server was available. (R) (mandatory)
+//			(4 bytes)
+//
+//		Eap Failure Frames Generated Autonomously
+//			EAP failure frames generated autonomously: This attribute counts EAPOL failure frames generated
+//			according to the local fallback policy because no radius server was available. (R) (mandatory)
+//			(4 bytes)
+//
+type Dot1XPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1xperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "Dot1XPerformanceMonitoringHistoryData",
+		ClassID: 292,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("EapolFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("EapolFramesTransmitted", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("EapolStartFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("EapolLogoffFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("InvalidEapolFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("EapRespIdFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("EapResponseFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("EapInitialRequestFramesTransmitted", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("EapRequestFramesTransmitted", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("EapLengthErrorFramesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("EapSuccessFramesGeneratedAutonomously", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("EapFailureFramesGeneratedAutonomously", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+		},
+	}
+}
+
+// NewDot1XPerformanceMonitoringHistoryData (class ID 292 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1XPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1xperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/dot1xportextensionpackage.go b/vendor/github.com/cboling/omci/generated/dot1xportextensionpackage.go
new file mode 100644
index 0000000..7ff0491
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/dot1xportextensionpackage.go
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Dot1XPortExtensionPackageClassId ClassID = ClassID(290)
+
+var dot1xportextensionpackageBME *ManagedEntityDefinition
+
+// Dot1XPortExtensionPackage (class ID #290)
+//	An instance of this ME represents a set of attributes that control a port's IEEE 802.1X
+//	operation. It is created and deleted autonomously by the ONU upon the creation or deletion of a
+//	PPTP that supports [IEEE 802.1X] authentication of customer premises equipment (CPE).
+//
+//	Relationships
+//		An instance of this ME is associated with a PPTP that performs IEEE 802.1X authentication of CPE
+//		(e.g., Ethernet or DSL).
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute provides a unique number for each instance of this ME. Its
+//			value is the same as that of its associated PPTP (i.e., slot and port number). (R) (mandatory)
+//			(2 bytes)
+//
+//		Dot1X Enable
+//			Dot1x enable: If true, this Boolean attribute forces the associated port to authenticate via
+//			[IEEE 802.1X] as a precondition of normal service. The default value false does not impose IEEE
+//			802.1X authentication on the associated port. (R, W) (mandatory) (1 byte)
+//
+//		Action Register
+//			(W) (mandatory) (1 byte)
+//
+//		Authenticator Pae State
+//			(R) (optional) (1 byte)
+//
+//		Backend Authentication State
+//			(R) (optional) (1 byte)
+//
+//		Admin Controlled Directions
+//			Admin controlled directions: This attribute controls the directionality of the port's
+//			authentication requirement. The default value 0 indicates that control is imposed in both
+//			directions. The value 1 indicates that control is imposed only on traffic from the subscriber
+//			towards the network. (R, W) (optional) (1 byte)
+//
+//		Operational Controlled Directions
+//			Operational controlled directions: This attribute indicates the directionality of the port's
+//			current authentication state. The value 0 indicates that control is imposed in both directions.
+//			The value 1 indicates that control is imposed only on traffic from the subscriber towards the
+//			network. (R) (optional) (1 byte)
+//
+//		Authenticator Controlled Port Status
+//			Authenticator controlled port status: This attribute indicates whether the controlled port is
+//			currently authorized (1) or unauthorized (2). (R) (optional) (1 byte)
+//
+//		Quiet Period
+//			Quiet period: This attribute specifies the interval between EAP request/identity invitations
+//			sent to the peer. Other events such as carrier present or EAPOL start frames from the peer may
+//			trigger an EAP request/identity frame from the ONU at any time; this attribute controls the
+//			ONU's periodic behaviour in the absence of these other inputs. It is expressed in seconds.
+//			(R, W) (optional) (2 bytes)
+//
+//		Server Timeout Period
+//			Server timeout period: This attribute specifies the time the ONU will wait for a response from
+//			the radius server before timing out. Within this maximum interval, the ONU may initiate several
+//			retransmissions with exponentially increasing delay. Upon timeout, the ONU may try another
+//			radius server if there is one, or invoke the fallback policy, if no alternate radius servers are
+//			available. Server timeout is expressed in seconds, with a default value of 30 and a maximum
+//			value of 65535. (R, W) (optional) (2 bytes)
+//
+//		Re_Authentication Period
+//			Re-authentication period: This attribute records the re-authentication interval specified by the
+//			radius authentication server. It is expressed in seconds. The attribute is only meaningful after
+//			a port has been authenticated. (R) (optional) (2 bytes)
+//
+//		Re_Authentication Enabled
+//			Re-authentication enabled: This Boolean attribute records whether the radius authentication
+//			server has enabled re-authentication on this service (true) or not (false). The attribute is
+//			only meaningful after a port has been authenticated. (R) (optional) (1 byte)
+//
+//		Key Transmission Enabled
+//			Key transmission enabled: This Boolean attribute indicates whether key transmission is enabled
+//			(true) or not (false). This feature is not required; the parameter is listed here for
+//			completeness vis-à-vis [IEEE 802.1X]. (R, W) (optional) (1 byte)
+//
+type Dot1XPortExtensionPackage struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	dot1xportextensionpackageBME = &ManagedEntityDefinition{
+		Name:    "Dot1XPortExtensionPackage",
+		ClassID: 290,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("Dot1XEnable", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("ActionRegister", 0, mapset.NewSetWith(Write), false, false, false, false, 2),
+			3:  ByteField("AuthenticatorPaeState", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  ByteField("BackendAuthenticationState", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5:  ByteField("AdminControlledDirections", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  ByteField("OperationalControlledDirections", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7:  ByteField("AuthenticatorControlledPortStatus", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8:  Uint16Field("QuietPeriod", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  Uint16Field("ServerTimeoutPeriod", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: Uint16Field("ReAuthenticationPeriod", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: ByteField("ReAuthenticationEnabled", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: ByteField("KeyTransmissionEnabled", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewDot1XPortExtensionPackage (class ID 290 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewDot1XPortExtensionPackage(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(dot1xportextensionpackageBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..0659555
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydata.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingGroupPerformanceMonitoringHistoryDataClassId ClassID = ClassID(421)
+
+var efmbondinggroupperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// EfmBondingGroupPerformanceMonitoringHistoryData (class ID #421)
+//	This ME collects PM data as seen at the xTU-C. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the EFM bonding group. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rx Bad Fragments
+//			Rx bad fragments: Clause 45.2.3.33 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Lost Fragments
+//			Rx lost fragments: Clause 45.2.3.34 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Lost Starts
+//			Rx lost starts: Clause 45.2.3.35 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Lost Ends
+//			Rx lost ends: Clause 45.2.3.36 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Frames
+//			Rx frames: Number of Ethernet frames received over this group. (R) (mandatory) (4 bytes)
+//
+//		Tx Frames
+//			Tx frames: Number of Ethernet frames transmitted over this group. (R) (mandatory) (4 bytes)
+//
+//		Rx Bytes
+//			Rx bytes: Number of bytes contained in the Ethernet frames received over this group. (R)
+//			(mandatory) (8 bytes)
+//
+//		Tx Bytes
+//			Tx bytes: Number of bytes contained in the Ethernet frames transmitted over this group. (R)
+//			(mandatory) (8 bytes)
+//
+//		Tx Discarded Frames
+//			Tx discarded frames: Number of Ethernet frames discarded by the group transmit function. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Discarded Bytes
+//			Tx discarded bytes: Number of bytes contained in the Ethernet frames discarded by the group
+//			transmit function. (R) (mandatory) (4 bytes)
+//
+type EfmBondingGroupPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondinggroupperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "EfmBondingGroupPerformanceMonitoringHistoryData",
+		ClassID: 421,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("RxBadFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("RxLostFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("RxLostStarts", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("RxLostEnds", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("RxFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TxFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint64Field("RxBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint64Field("TxBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("TxDiscardedFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TxDiscardedBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+		},
+	}
+}
+
+// NewEfmBondingGroupPerformanceMonitoringHistoryData (class ID 421 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingGroupPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondinggroupperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydatapart2.go b/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydatapart2.go
new file mode 100644
index 0000000..eddbf2c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondinggroupperformancemonitoringhistorydatapart2.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingGroupPerformanceMonitoringHistoryDataPart2ClassId ClassID = ClassID(422)
+
+var efmbondinggroupperformancemonitoringhistorydatapart2BME *ManagedEntityDefinition
+
+// EfmBondingGroupPerformanceMonitoringHistoryDataPart2 (class ID #422)
+//	This ME collects PM data as seen at the xTU-C. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the EFM bonding group. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rx Unicast Frames
+//			Rx unicast frames: Number of unicast Ethernet frames received over this group. (R) (mandatory)
+//			(4 bytes)
+//
+//		Tx Unicast Frames
+//			Tx unicast frames: Number of unicast Ethernet frames transmitted over this group. (R)
+//			(mandatory) (4 bytes)
+//
+//		Rx Unicast Bytes
+//			Rx unicast bytes: Number of bytes contained in the unicast Ethernet frames received over this
+//			group. (R) (mandatory) (8 bytes)
+//
+//		Tx Unicast Bytes
+//			Tx unicast bytes: Number of bytes contained in the unicast Ethernet frames transmitted over this
+//			group. (R) (mandatory) (8 bytes)
+//
+//		Rx Broadcast Frames
+//			Rx broadcast frames: Number of broadcast Ethernet frames received over this group. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Broadcast Frames
+//			Tx broadcast frames: Number of broadcast Ethernet frames transmitted over this group. (R)
+//			(mandatory) (4 bytes)
+//
+//		Rx Broadcast Bytes
+//			Rx broadcast bytes: Number of bytes contained in the broadcast Ethernet frames received over
+//			this group. (R) (mandatory) (8 bytes)
+//
+//		Tx Broadcast Bytes
+//			Tx broadcast bytes: Number of bytes contained in the broadcast Ethernet frames transmitted over
+//			this group. (R) (mandatory) (8 bytes)
+//
+//		Rx Multicast Frames
+//			Rx multicast frames: Number of multicast Ethernet frames received over this group. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Multicast Frames
+//			Tx multicast frames: Number of multicast Ethernet frames transmitted over this group. (R)
+//			(mandatory) (4 bytes)
+//
+//		Rx Multicast Bytes
+//			Rx multicast bytes: Number of bytes contained in the multicast Ethernet frames received over
+//			this group. (R) (mandatory) (8 bytes)
+//
+//		Tx Multicast Bytes
+//			Tx multicast bytes: Number of bytes contained in the multicast Ethernet frames transmitted over
+//			this group. (R) (mandatory) (8 bytes)
+//
+type EfmBondingGroupPerformanceMonitoringHistoryDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondinggroupperformancemonitoringhistorydatapart2BME = &ManagedEntityDefinition{
+		Name:    "EfmBondingGroupPerformanceMonitoringHistoryDataPart2",
+		ClassID: 422,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("RxUnicastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TxUnicastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint64Field("RxUnicastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint64Field("TxUnicastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("RxBroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TxBroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint64Field("RxBroadcastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint64Field("TxBroadcastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("RxMulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TxMulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint64Field("RxMulticastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint64Field("TxMulticastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+		},
+	}
+}
+
+// NewEfmBondingGroupPerformanceMonitoringHistoryDataPart2 (class ID 422 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingGroupPerformanceMonitoringHistoryDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondinggroupperformancemonitoringhistorydatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondinglink.go b/vendor/github.com/cboling/omci/generated/efmbondinglink.go
new file mode 100644
index 0000000..5ec16b5
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondinglink.go
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingLinkClassId ClassID = ClassID(420)
+
+var efmbondinglinkBME *ManagedEntityDefinition
+
+// EfmBondingLink (class ID #420)
+//	The EFM bonding link represents a link that can be bonded with other links to form a group. In
+//	[IEEE 802.3], a bonding group is known as a PAF and a link is known as a PME. Instances of this
+//	ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or one instance of an EFM bonding group.
+//
+//	Attributes
+//		Managed Entity Id
+//			NOTE – This attribute has the same meaning as the Stream ID in clause C.3.1.2 of [ITU-T
+//			G.998.2], except that it cannot be changed. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Associated Group Me Id
+//			Associated group ME ID: This attribute is the ME ID of the bonding group to which this link is
+//			associated. Changing this attribute moves the link from one group to another. Setting this
+//			attribute to an ME ID that has not yet been provisioned will result in this link being placed in
+//			a single-link group that contains only this link. The default value for this attribute is the
+//			null pointer, 0xFFFF. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Link Alarm Enable
+//			(R, W, setbycreate) (mandatory) (1 bytes)
+//
+type EfmBondingLink struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondinglinkBME = &ManagedEntityDefinition{
+		Name:    "EfmBondingLink",
+		ClassID: 420,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("AssociatedGroupMeId", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("LinkAlarmEnable", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+		},
+	}
+}
+
+// NewEfmBondingLink (class ID 420 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingLink(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondinglinkBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondinglinkperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/efmbondinglinkperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..f5963d4
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondinglinkperformancemonitoringhistorydata.go
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingLinkPerformanceMonitoringHistoryDataClassId ClassID = ClassID(423)
+
+var efmbondinglinkperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// EfmBondingLinkPerformanceMonitoringHistoryData (class ID #423)
+//	This ME collects PM data as seen at the xTU-C. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the EFM bonding link. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rx Errored Fragments
+//			Rx errored fragments: Clause 45.2.3.29 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Small Fragments
+//			Rx small fragments: Clause 45.2.3.30 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Large Fragments
+//			Rx large fragments: Clause 45.2.3.31 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Discarded Fragments
+//			Rx discarded fragments: Clause 45.2.3.32 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Fcs Errors
+//			Rx FCS errors: Clause 45.2.6.11 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Coding Errors
+//			Rx coding errors: Clause 45.2.6.12 of [IEEE 802.3]. (R) (mandatory) (4 bytes)
+//
+//		Rx Fragments
+//			Rx fragments: Number of fragments received over this link. (R) (mandatory) (4 bytes)
+//
+//		Tx Fragments
+//			Tx fragments: Number of fragments transmitted over this link. (R) (mandatory) (4 bytes)
+//
+type EfmBondingLinkPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondinglinkperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "EfmBondingLinkPerformanceMonitoringHistoryData",
+		ClassID: 423,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("RxErroredFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("RxSmallFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("RxLargeFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("RxDiscardedFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("RxFcsErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("RxCodingErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("RxFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TxFragments", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+		},
+	}
+}
+
+// NewEfmBondingLinkPerformanceMonitoringHistoryData (class ID 423 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingLinkPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondinglinkperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..a857217
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydata.go
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingPortPerformanceMonitoringHistoryDataClassId ClassID = ClassID(424)
+
+var efmbondingportperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// EfmBondingPortPerformanceMonitoringHistoryData (class ID #424)
+//	This ME collects PM data as seen at the xTU-C. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rx Frames
+//			Rx frames: Number of Ethernet frames received over this port. (R) (mandatory) (4 bytes)
+//
+//		Tx Frames
+//			Tx frames: Number of Ethernet frames transmitted over this port. (R) (mandatory) (4 bytes)
+//
+//		Rx Bytes
+//			Rx bytes: Number of bytes contained in the Ethernet frames received over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Bytes
+//			Tx bytes: Number of bytes contained in the Ethernet frames transmitted over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Discarded Frames
+//			Tx discarded frames: Number of Ethernet frames discarded by the port transmit function. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Discarded Bytes
+//			Tx discarded bytes: Number of bytes contained in the Ethernet frames discarded by the port
+//			transmit function. (R) (mandatory) (4 bytes)
+//
+type EfmBondingPortPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondingportperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "EfmBondingPortPerformanceMonitoringHistoryData",
+		ClassID: 424,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("RxFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("TxFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("RxBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("TxBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("TxDiscardedFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint32Field("TxDiscardedBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewEfmBondingPortPerformanceMonitoringHistoryData (class ID 424 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingPortPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondingportperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydatapart2.go b/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydatapart2.go
new file mode 100644
index 0000000..cc3dd54
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/efmbondingportperformancemonitoringhistorydatapart2.go
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EfmBondingPortPerformanceMonitoringHistoryDataPart2ClassId ClassID = ClassID(425)
+
+var efmbondingportperformancemonitoringhistorydatapart2BME *ManagedEntityDefinition
+
+// EfmBondingPortPerformanceMonitoringHistoryDataPart2 (class ID #425)
+//	This ME collects PM data as seen at the xTU-C. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rx Unicast Frames
+//			Rx unicast frames: Number of unicast Ethernet frames received over this port. (R) (mandatory)
+//			(4 bytes)
+//
+//		Tx Unicast Frames
+//			Tx unicast frames: Number of unicast Ethernet frames transmitted over this port. (R) (mandatory)
+//			(4 bytes)
+//
+//		Rx Unicast Bytes
+//			Rx unicast bytes: Number of bytes contained in the unicast Ethernet frames received over this
+//			port. (R) (mandatory) (4 bytes)
+//
+//		Tx Unicast Bytes
+//			Tx unicast bytes: Number of bytes contained in the unicast Ethernet frames transmitted over this
+//			port. (R) (mandatory) (4 bytes)
+//
+//		Rx Broadcast Frames
+//			Rx broadcast frames: Number of broadcast Ethernet frames received over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Broadcast Frames
+//			Tx broadcast frames: Number of broadcast Ethernet frames transmitted over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Rx Broadcast Bytes
+//			Rx broadcast bytes: Number of bytes contained in the broadcast Ethernet frames received over
+//			this port. (R) (mandatory) (4 bytes)
+//
+//		Tx Broadcast Bytes
+//			Tx broadcast bytes: Number of bytes contained in the broadcast Ethernet frames transmitted over
+//			this port. (R) (mandatory) (4 bytes)
+//
+//		Rx Multicast Frames
+//			Rx multicast frames: Number of multicast Ethernet frames received over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Tx Multicast Frames
+//			Tx multicast frames: Number of multicast Ethernet frames transmitted over this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Rx Multicast Bytes
+//			Rx multicast bytes: Number of bytes contained in the multicast Ethernet frames received over
+//			this port. (R) (mandatory) (4 bytes)
+//
+//		Tx Multicast Bytes
+//			Tx multicast bytes: Number of bytes contained in the multicast Ethernet frames transmitted over
+//			this port. (R) (mandatory) (4 bytes)
+//
+type EfmBondingPortPerformanceMonitoringHistoryDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	efmbondingportperformancemonitoringhistorydatapart2BME = &ManagedEntityDefinition{
+		Name:    "EfmBondingPortPerformanceMonitoringHistoryDataPart2",
+		ClassID: 425,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("RxUnicastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TxUnicastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("RxUnicastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("TxUnicastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("RxBroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TxBroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("RxBroadcastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TxBroadcastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("RxMulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TxMulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("RxMulticastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("TxMulticastBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+		},
+	}
+}
+
+// NewEfmBondingPortPerformanceMonitoringHistoryDataPart2 (class ID 425 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEfmBondingPortPerformanceMonitoringHistoryDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(efmbondingportperformancemonitoringhistorydatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/energyconsumptionperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/energyconsumptionperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..26b0828
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/energyconsumptionperformancemonitoringhistorydata.go
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EnergyConsumptionPerformanceMonitoringHistoryDataClassId ClassID = ClassID(343)
+
+var energyconsumptionperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// EnergyConsumptionPerformanceMonitoringHistoryData (class ID #343)
+//	This ME collects PM data associated with the ONU's energy consumption. The time spent in various
+//	low-power states is recorded as a measure of their utility. Furthermore, the ONU may also
+//	include the equivalent of a watt-hour meter, which can be sampled from time to time to measure
+//	actual power consumed.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with the ONU in its entirety.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The ME ID must
+//			be 0. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: No thresholds are defined for this ME. For uniformity with other PMs, the
+//			attribute is retained and shown as mandatory, but it should be set to a null pointer. (R, W,
+//			set-by-create) (mandatory) (2 bytes)
+//
+//		Doze Time
+//			Doze time: This attribute records the time during which the ONU was in doze energy conservation
+//			mode, measured in microseconds. If watchful sleep is enabled in the ONU dynamic power management
+//			control ME, the ONU ignores this attribute. (R) (mandatory) (4 bytes)
+//
+//		Cyclic Sleep Time
+//			Cyclic sleep time: This attribute records the time during which the ONU was in cyclic sleep
+//			energy conservation mode, measured in microseconds. If watchful sleep is enabled in the ONU
+//			dynamic power management control ME, the ONU ignores this attribute. (R) (mandatory) (4 bytes)
+//
+//		Watchful Sleep Time
+//			Watchful sleep time: This attribute records the time during which the ONU was in watchful sleep
+//			energy conservation mode, measured in microseconds. (R) (mandatory) (4 bytes)
+//
+//		Energy Consumed
+//			Energy consumed: This attribute records the energy consumed by the ONU, measured in millijoules.
+//			(R) (optional) (4 bytes)
+//
+type EnergyConsumptionPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	energyconsumptionperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "EnergyConsumptionPerformanceMonitoringHistoryData",
+		ClassID: 343,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("DozeTime", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("CyclicSleepTime", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("WatchfulSleepTime", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("EnergyConsumed", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+		},
+	}
+}
+
+// NewEnergyConsumptionPerformanceMonitoringHistoryData (class ID 343 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEnergyConsumptionPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(energyconsumptionperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/enhancedsecuritycontrol.go b/vendor/github.com/cboling/omci/generated/enhancedsecuritycontrol.go
new file mode 100644
index 0000000..bc8246c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/enhancedsecuritycontrol.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EnhancedSecurityControlClassId ClassID = ClassID(332)
+
+var enhancedsecuritycontrolBME *ManagedEntityDefinition
+
+// EnhancedSecurityControl (class ID #332)
+//	This ME contains the capabilities, parameters and controls of enhanced GPON security features
+//	when they are negotiated via the OMCI (Note). The attributes in this ME are intended to be used
+//	to implement a symmetric-key-based three step authentication process as described in the
+//	supplemental information section in the following.
+//
+//	NOTE – If an ITU-T G.987 system uses 802.1X authentication as defined in [ITU-T G.987.3], the
+//	only applicable attribute of this ME is the broadcast key table.
+//
+//	Relationships
+//		One instance of this ME is associated with the ONU ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Olt Crypto Capabilities
+//			(W) (mandatory) (16 bytes)
+//
+//		Olt Random Challenge Table
+//			NOTE – It is assumed that the length of OLT_challenge is always an integer multiple of 16 bytes.
+//
+//		Olt Challenge Status
+//			The ONU initializes this attribute to the value false. (R, W) (mandatory) (1 byte)
+//
+//		Onu Selected Crypto Capabilities
+//			ONU selected crypto capabilities: This attribute specifies the cryptographic capability selected
+//			by the ONU in authentication step 2. Its value specifies one of the bit positions that has the
+//			value 1 in the OLT crypto capabilities attribute. (R) (mandatory) (1 byte)
+//
+//		Onu Random Challenge Table
+//			ONU random challenge table: This attribute specifies the random challenge ONU_challenge issued
+//			by the ONU during authentication step 2. It is structured as a table, with each entry being
+//			16 bytes of content. ONU_challenge is the concatenation of all 16 byte content fields in the
+//			table. Once the OLT triggers a response to be generated using the OLT challenge status
+//			attribute, the ONU generates the response and writes the table (in a single operation). The AVC
+//			generated by this attribute signals to the OLT that the challenge is ready, so that the OLT can
+//			commence a get/get-next sequence to obtain the table's contents. (R) (mandatory) (16 * P bytes)
+//
+//		Onu Authentication Result Table
+//			Once the OLT triggers a response to be generated using the OLT challenge status attribute, the
+//			ONU generates ONU_result and writes the table (in a single operation). The AVC generated by this
+//			attribute signals to the OLT that the response is ready, so that the OLT can commence a get/get-
+//			next sequence to obtain the table's contents. (R) (mandatory) (16 * Q bytes)
+//
+//		Olt Authentication Result Table
+//			This attribute is structured as a table, with each entry being 17 bytes. The first byte is the
+//			table row number, starting at 1; the remaining 16 bytes are content. OLT_result is the
+//			concatenation of all 16 byte content fields. The OLT writes all entries into the table, and then
+//			triggers the ONU's processing of the table using the OLT result status attribute. The number of
+//			rows R is implicit in the choice of hash algorithm. The OLT can clear the table with a set
+//			operation to row 0. (W) (mandatory) (17 * R bytes)
+//
+//		Olt Result Status
+//			(R, W) (mandatory) (1 byte)
+//
+//		Onu Authentication Status
+//			(R) (mandatory) (1 byte)
+//
+//		Master Session Key Name
+//			Upon the invalidation of a master session key (e.g., due to an ONU reset or deactivation, or due
+//			to an ONU-local decision that the master session key has expired), the ONU sets the master
+//			session key name to all zeros. (R) (mandatory) (16 bytes)
+//
+//		Broadcast Key Table
+//			(R, W) (optional) (18N bytes)
+//
+//		Effective Key Length
+//			Effective key length: This attribute specifies the maximum effective length, in bits, of keys
+//			generated by the ONU. (R) (optional) (2 bytes)
+//
+type EnhancedSecurityControl struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	enhancedsecuritycontrolBME = &ManagedEntityDefinition{
+		Name:    "EnhancedSecurityControl",
+		ClassID: 332,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  MultiByteField("OltCryptoCapabilities", 16, nil, mapset.NewSetWith(Write), false, false, false, false, 1),
+			2:  TableField("OltRandomChallengeTable", TableInfo{nil, 17}, mapset.NewSetWith(Read, Write), false, false, false, 2),
+			3:  ByteField("OltChallengeStatus", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  ByteField("OnuSelectedCryptoCapabilities", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  TableField("OnuRandomChallengeTable", TableInfo{nil, 16}, mapset.NewSetWith(Read), true, false, false, 5),
+			6:  TableField("OnuAuthenticationResultTable", TableInfo{nil, 16}, mapset.NewSetWith(Read), true, false, false, 6),
+			7:  TableField("OltAuthenticationResultTable", TableInfo{nil, 17}, mapset.NewSetWith(Read, Write), false, false, false, 7),
+			8:  ByteField("OltResultStatus", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("OnuAuthenticationStatus", 0, mapset.NewSetWith(Read), true, false, false, false, 9),
+			10: MultiByteField("MasterSessionKeyName", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: TableField("BroadcastKeyTable", TableInfo{nil, 18}, mapset.NewSetWith(Read, Write), false, true, false, 11),
+			12: Uint16Field("EffectiveKeyLength", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewEnhancedSecurityControl (class ID 332 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEnhancedSecurityControl(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(enhancedsecuritycontrolBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/equipmentextensionpackage.go b/vendor/github.com/cboling/omci/generated/equipmentextensionpackage.go
new file mode 100644
index 0000000..2b46de7
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/equipmentextensionpackage.go
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EquipmentExtensionPackageClassId ClassID = ClassID(160)
+
+var equipmentextensionpackageBME *ManagedEntityDefinition
+
+// EquipmentExtensionPackage (class ID #160)
+//	This ME supports optional extensions to circuit pack MEs. If the circuit pack supports these
+//	features, the ONU creates and deletes this ME along with its associated real or virtual circuit
+//	pack.
+//
+//	Relationships
+//		An equipment extension package may be contained by an ONU-G or cardholder.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the ONU-G or cardholder. (R)
+//			(mandatory) (2 bytes)
+//
+//		Environmental Sense
+//			NOTE – Some specific sense point applications are already defined on the ONU-G ME. It is the
+//			vendor's choice how to configure and report sense points that appear both generically and
+//			specifically.
+//
+//		Contact Closure Output
+//			On read, the left bit in each pair should be set to 0 at the ONU and ignored at the OLT. The
+//			right bit indicates a released output point with 0 and an operated contact point with 1. (R, W)
+//			(optional) (2 bytes)
+//
+type EquipmentExtensionPackage struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	equipmentextensionpackageBME = &ManagedEntityDefinition{
+		Name:    "EquipmentExtensionPackage",
+		ClassID: 160,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("EnvironmentalSense", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 1),
+			2: Uint16Field("ContactClosureOutput", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 2),
+		},
+	}
+}
+
+// NewEquipmentExtensionPackage (class ID 160 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEquipmentExtensionPackage(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(equipmentextensionpackageBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetflowterminationpoint.go b/vendor/github.com/cboling/omci/generated/ethernetflowterminationpoint.go
new file mode 100644
index 0000000..0e4454a
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetflowterminationpoint.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetFlowTerminationPointClassId ClassID = ClassID(286)
+
+var ethernetflowterminationpointBME *ManagedEntityDefinition
+
+// EthernetFlowTerminationPoint (class ID #286)
+//	The Ethernet flow TP contains the attributes necessary to originate and terminate Ethernet
+//	frames in the ONU. It is appropriate when transporting pseudowire services via layer 2.
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		One Ethernet flow TP ME exists for each distinct pseudowire service that is transported via
+//		layer 2.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to a pseudowire TP ME. (R, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Destination Mac
+//			Destination MAC: This attribute specifies the destination MAC address of upstream Ethernet
+//			frames. (R, W, setbycreate) (mandatory) (6 bytes)
+//
+//		Source Mac
+//			Source MAC: This attribute specifies the near-end MAC address. It is established by nonOMCI
+//			means (e.g., factory programmed into ONU flash memory) and is included here for information
+//			only. (R) (mandatory) (6 bytes)
+//
+//		Tag Policy
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tci
+//			TCI:	If the tag policy calls for tagging of upstream Ethernet frames, this attribute specifies
+//			the tag control information, which includes the VLAN tag, P bits and CFI bit. (R, W) (optional)
+//			(2 bytes)
+//
+//		Loopback
+//			(R, W) (mandatory) (1 byte)
+//
+type EthernetFlowTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetflowterminationpointBME = &ManagedEntityDefinition{
+		Name:    "EthernetFlowTerminationPoint",
+		ClassID: 286,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: MultiByteField("DestinationMac", 6, nil, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: MultiByteField("SourceMac", 6, nil, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: ByteField("TagPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("Tci", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: ByteField("Loopback", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewEthernetFlowTerminationPoint (class ID 286 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetFlowTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetflowterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm.go b/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm.go
new file mode 100644
index 0000000..887f8f2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm.go
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetFrameExtendedPmClassId ClassID = ClassID(334)
+
+var ethernetframeextendedpmBME *ManagedEntityDefinition
+
+// EthernetFrameExtendedPm (class ID #334)
+//	This ME collects some of the PM data at a point where an Ethernet flow can be observed. It is
+//	based on the Etherstats group of [IETF RFC 2819]. Instances of this ME are created and deleted
+//	by the OLT. References to received frames are to be interpreted as the number of frames entering
+//	the monitoring point in the direction specified by the control block.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME may be associated with an instance of an ME at any Ethernet interface
+//		within the ONU. The specific ME is identified in the control block attribute.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. To facilitate
+//			discovery, the identification of instances sequentially starting with 1 is encouraged. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. If
+//			continuous accumulation is enabled in the control block, this attribute is not used and has the
+//			fixed value 0. (R) (mandatory) (1 byte)
+//
+//		Control Block
+//			(R, W, setbycreate) (mandatory) (16 bytes)
+//
+//		Drop Events
+//			Drop events:	The total number of events in which frames were dropped due to a lack of resources.
+//			This is not necessarily the number of frames dropped; it is the number of times this event was
+//			detected. (R) (mandatory) (4 bytes)
+//
+//		Octets
+//			Octets:	The total number of octets received, including those in bad frames, excluding framing
+//			bits, but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames
+//			Frames:	The total number of frames received, including bad frames, broadcast frames and
+//			multicast frames. (R) (mandatory) (4 bytes)
+//
+//		Broadcast Frames
+//			Broadcast frames: The total number of received good frames directed to the broadcast address.
+//			This does not include multicast frames. (R) (mandatory) (4 bytes)
+//
+//		Multicast Frames
+//			Multicast frames: The total number of received good frames directed to a multicast address. This
+//			does not include broadcast frames. (R) (mandatory) (4 bytes)
+//
+//		Crc Errored Frames
+//			CRC errored frames: The total number of frames received that had a length (excluding framing
+//			bits, but including FCS octets) of between 64 and 1518 octets, inclusive, but had either a bad
+//			FCS with an integral number of octets (FCS error) or a bad FCS with a non-integral number of
+//			octets (alignment error). (R) (mandatory) (4 bytes)
+//
+//		Undersize Frames
+//			Undersize frames: The total number of frames received that were less than 64 octets long but
+//			were otherwise well formed (excluding framing bits, but including FCS octets). (R) (mandatory)
+//			(4 bytes)
+//
+//		Oversize Frames
+//			Oversize frames: The total number of frames received that were longer than 1518 octets
+//			(excluding framing bits, but including FCS octets) and were otherwise well formed. (R)
+//			(mandatory) (4 bytes)
+//
+//		Frames 64 Octets
+//			Frames 64 octets: The total number of received frames (including bad frames) that were 64 octets
+//			long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 65 To 127 Octets
+//			Frames 65 to 127 octets: The total number of received frames (including bad frames) that were
+//			65..127 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 128 To 255 Octets
+//			Frames 128 to 255 octets: The total number of frames (including bad frames) received that were
+//			128..255 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 256 To 511 Octets
+//			Frames 256 to 511 octets: The total number of frames (including bad frames) received that were
+//			256..511 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 512 To 1 023 Octets
+//			Frames 512 to 1 023 octets: The total number of frames (including bad frames) received that were
+//			512..1 023 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 1024 To 1518 Octets
+//			Frames 1024 to 1518 octets: The total number of frames (including bad frames) received that were
+//			1024..1518 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+type EthernetFrameExtendedPm struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetframeextendedpmBME = &ManagedEntityDefinition{
+		Name:    "EthernetFrameExtendedPm",
+		ClassID: 334,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  MultiByteField("ControlBlock", 16, nil, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("DropEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("Frames", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("BroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("MulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("CrcErroredFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("UndersizeFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("OversizeFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("Frames64Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("Frames65To127Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("Frames128To255Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("Frames256To511Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("Frames512To1023Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("Frames1024To1518Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetFrameExtendedPm (class ID 334 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetFrameExtendedPm(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetframeextendedpmBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm64-bit.go b/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm64-bit.go
new file mode 100644
index 0000000..38b2a0e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetframeextendedpm64-bit.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetFrameExtendedPm64BitClassId ClassID = ClassID(426)
+
+var ethernetframeextendedpm64bitBME *ManagedEntityDefinition
+
+// EthernetFrameExtendedPm64Bit (class ID #426)
+//	This ME collects some of the PM data at a point where an Ethernet flow can be observed. It is
+//	based on the Etherstats group of [IETF RFC 2819] and [IETF RFC 2863]. Instances of this ME are
+//	created and deleted by the OLT. References to received frames are to be interpreted as the
+//	number of frames entering the monitoring point in the direction specified by the control block.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. To facilitate
+//			discovery, the identification of instances sequentially starting with 1 is encouraged. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. If
+//			continuous accumulation is enabled in the control block, this attribute is not used and has the
+//			fixed value 0. (R) (mandatory) (1 byte)
+//
+//		Control Block
+//			(R, W, setbycreate) (mandatory) (16 bytes)
+//
+//		Drop Events
+//			Drop events:	The total number of events in which frames were dropped due to a lack of resources.
+//			This is not necessarily the number of frames dropped; it is the number of times this event was
+//			detected. (R) (mandatory) (4 bytes)
+//
+//		Octets
+//			Octets:	The total number of octets received, including those in bad frames, excluding framing
+//			bits, but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames
+//			Frames:	The total number of frames received, including bad frames, broadcast frames and
+//			multicast frames. (R) (mandatory) (4 bytes)
+//
+//		Broadcast Frames
+//			Broadcast frames: The total number of received good frames directed to the broadcast address.
+//			This does not include multicast frames. (R) (mandatory) (4 bytes)
+//
+//		Multicast Frames
+//			Multicast frames: The total number of received good frames directed to a multicast address. This
+//			does not include broadcast frames. (R) (mandatory) (4 bytes)
+//
+//		Crc Errored Frames
+//			CRC errored frames: The total number of frames received that had a length (excluding framing
+//			bits, but including FCS octets) of between 64 and 1518 octets, inclusive, but had either a bad
+//			FCS with an integral number of octets (FCS error) or a bad FCS with a non-integral number of
+//			octets (alignment error). (R) (mandatory) (4 bytes)
+//
+//		Undersize Frames
+//			Undersize frames: The total number of frames received that were less than 64 octets long but
+//			were otherwise well formed (excluding framing bits, but including FCS octets). (R) (mandatory)
+//			(4 bytes)
+//
+//		Oversize Frames
+//			Oversize frames: The total number of frames received that were longer than 1518 octets
+//			(excluding framing bits, but including FCS octets) and were otherwise well formed. (R)
+//			(mandatory) (4 bytes)
+//
+//		Frames 64 Octets
+//			Frames 64 octets: The total number of received frames (including bad frames) that were 64 octets
+//			long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 65 To 127 Octets
+//			Frames 65 to 127 octets: The total number of received frames (including bad frames) that were
+//			65..127 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 128 To 255 Octets
+//			Frames 128 to 255 octets: The total number of frames (including bad frames) received that were
+//			128..255 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 256 To 511 Octets
+//			Frames 256 to 511 octets: The total number of frames (including bad frames) received that were
+//			256..511 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 512 To 1 023 Octets
+//			Frames 512 to 1 023 octets: The total number of frames (including bad frames) received that were
+//			512..1 023 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Frames 1024 To 1518 Octets
+//			Frames 1024 to 1518 octets: The total number of frames (including bad frames) received that were
+//			1024..1518 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+type EthernetFrameExtendedPm64Bit struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetframeextendedpm64bitBME = &ManagedEntityDefinition{
+		Name:    "EthernetFrameExtendedPm64Bit",
+		ClassID: 426,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  MultiByteField("ControlBlock", 16, nil, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint64Field("DropEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint64Field("Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint64Field("Frames", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint64Field("BroadcastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint64Field("MulticastFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint64Field("CrcErroredFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint64Field("UndersizeFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint64Field("OversizeFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint64Field("Frames64Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint64Field("Frames65To127Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint64Field("Frames128To255Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint64Field("Frames256To511Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint64Field("Frames512To1023Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint64Field("Frames1024To1518Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetFrameExtendedPm64Bit (class ID 426 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetFrameExtendedPm64Bit(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetframeextendedpm64bitBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydatadownstream.go b/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydatadownstream.go
new file mode 100644
index 0000000..6c65ece
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydatadownstream.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetFramePerformanceMonitoringHistoryDataDownstreamClassId ClassID = ClassID(321)
+
+var ethernetframeperformancemonitoringhistorydatadownstreamBME *ManagedEntityDefinition
+
+// EthernetFramePerformanceMonitoringHistoryDataDownstream (class ID #321)
+//	This ME is identical to the Ethernet frame PM history data upstream ME, with the exception that
+//	it monitors downstream traffic.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of a MAC bridge port configuration
+//			data. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Drop Events
+//			Drop events:	The total number of events in which packets were dropped due to a lack of
+//			resources. This is not necessarily the number of packets dropped; it is the number of times this
+//			event was detected. (R) (mandatory) (4 bytes)
+//
+//		Octets
+//			Octets:	The total number of upstream octets received, including those in bad packets, excluding
+//			framing bits, but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets
+//			Packets:	The total number of upstream packets received, including bad packets, broadcast packets
+//			and multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Broadcast Packets
+//			Broadcast packets: The total number of upstream good packets received that were directed to the
+//			broadcast address. This does not include multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Multicast Packets
+//			Multicast packets: The total number of upstream good packets received that were directed to a
+//			multicast address. This does not include broadcast packets. (R) (mandatory) (4 bytes)
+//
+//		Crc Errored Packets
+//			CRC errored packets: The total number of upstream packets received that had a length (excluding
+//			framing bits, but including FCS octets) of between 64 octets and 1518 octets, inclusive, but had
+//			either a bad FCS with an integral number of octets (FCS error) or a bad FCS with a non-integral
+//			number of octets (alignment error). (R) (mandatory) (4 bytes)
+//
+//		Undersize Packets
+//			Undersize packets: The total number of upstream packets received that were less than 64 octets
+//			long, but were otherwise well formed (excluding framing bits, but including FCS). (R)
+//			(mandatory) (4 bytes)
+//
+//		Oversize Packets
+//			NOTE 2 – If 2 000 byte Ethernet frames are supported, counts in this performance parameter are
+//			not necessarily errors.
+//
+//		Packets 64 Octets
+//			Packets 64 octets: The total number of upstream received packets (including bad packets) that
+//			were 64 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 65 To 127 Octets
+//			Packets 65 to 127 octets: The total number of upstream received packets (including bad packets)
+//			that were 65..127 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 128 To 255 Octets
+//			Packets 128 to 255 octets: The total number of upstream packets (including bad packets) received
+//			that were 128..255 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 256 To 511 Octets
+//			Packets 256 to 511 octets: The total number of upstream packets (including bad packets) received
+//			that were 256..511 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 512 To 1023 Octets
+//			Packets 512 to 1023 octets: The total number of upstream packets (including bad packets)
+//			received that were 512..1 023 octets long, excluding framing bits but including FCS. (R)
+//			(mandatory) (4 bytes)
+//
+//		Packets 1024 To 1518 Octets
+//			Packets 1024 to 1518 octets: The total number of upstream packets (including bad packets)
+//			received that were 1024..1518 octets long, excluding framing bits, but including FCS. (R)
+//			(mandatory) (4 bytes)
+//
+type EthernetFramePerformanceMonitoringHistoryDataDownstream struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetframeperformancemonitoringhistorydatadownstreamBME = &ManagedEntityDefinition{
+		Name:    "EthernetFramePerformanceMonitoringHistoryDataDownstream",
+		ClassID: 321,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("DropEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("Packets", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("BroadcastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("MulticastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("CrcErroredPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("UndersizePackets", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("OversizePackets", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("Packets64Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("Packets65To127Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("Packets128To255Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("Packets256To511Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("Packets512To1023Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("Packets1024To1518Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetFramePerformanceMonitoringHistoryDataDownstream (class ID 321 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetFramePerformanceMonitoringHistoryDataDownstream(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetframeperformancemonitoringhistorydatadownstreamBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydataupstream.go b/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydataupstream.go
new file mode 100644
index 0000000..e808485
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetframeperformancemonitoringhistorydataupstream.go
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetFramePerformanceMonitoringHistoryDataUpstreamClassId ClassID = ClassID(322)
+
+var ethernetframeperformancemonitoringhistorydataupstreamBME *ManagedEntityDefinition
+
+// EthernetFramePerformanceMonitoringHistoryDataUpstream (class ID #322)
+//	This ME collects PM data associated with upstream Ethernet frame delivery. It is based on the
+//	Etherstats group of [IETF RFC 2819]. Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	NOTE 1 – Implementers are encouraged to consider the Ethernet frame extended PM ME defined in
+//	clause 9.3.32, which collects the same counters in a more generalized way.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of a MAC bridge port configuration
+//			data. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Drop Events
+//			Drop events:	The total number of events in which packets were dropped due to a lack of
+//			resources. This is not necessarily the number of packets dropped; it is the number of times this
+//			event was detected. (R) (mandatory) (4 bytes)
+//
+//		Octets
+//			Octets:	The total number of upstream octets received, including those in bad packets, excluding
+//			framing bits, but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets
+//			Packets:	The total number of upstream packets received, including bad packets, broadcast packets
+//			and multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Broadcast Packets
+//			Broadcast packets: The total number of upstream good packets received that were directed to the
+//			broadcast address. This does not include multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Multicast Packets
+//			Multicast packets: The total number of upstream good packets received that were directed to a
+//			multicast address. This does not include broadcast packets. (R) (mandatory) (4 bytes)
+//
+//		Crc Errored Packets
+//			CRC errored packets: The total number of upstream packets received that had a length (excluding
+//			framing bits, but including FCS octets) of between 64 octets and 1518 octets, inclusive, but had
+//			either a bad FCS with an integral number of octets (FCS error) or a bad FCS with a non-integral
+//			number of octets (alignment error). (R) (mandatory) (4 bytes)
+//
+//		Undersize Packets
+//			Undersize packets: The total number of upstream packets received that were less than 64 octets
+//			long, but were otherwise well formed (excluding framing bits, but including FCS). (R)
+//			(mandatory) (4 bytes)
+//
+//		Oversize Packets
+//			NOTE 2 – If 2 000 byte Ethernet frames are supported, counts in this performance parameter are
+//			not necessarily errors.
+//
+//		Packets 64 Octets
+//			Packets 64 octets: The total number of upstream received packets (including bad packets) that
+//			were 64 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 65 To 127 Octets
+//			Packets 65 to 127 octets: The total number of upstream received packets (including bad packets)
+//			that were 65..127 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 128 To 255 Octets
+//			Packets 128 to 255 octets: The total number of upstream packets (including bad packets) received
+//			that were 128..255 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 256 To 511 Octets
+//			Packets 256 to 511 octets: The total number of upstream packets (including bad packets) received
+//			that were 256..511 octets long, excluding framing bits but including FCS. (R) (mandatory)
+//			(4 bytes)
+//
+//		Packets 512 To 1023 Octets
+//			Packets 512 to 1023 octets: The total number of upstream packets (including bad packets)
+//			received that were 512..1 023 octets long, excluding framing bits but including FCS. (R)
+//			(mandatory) (4 bytes)
+//
+//		Packets 1024 To 1518 Octets
+//			Packets 1024 to 1518 octets: The total number of upstream packets (including bad packets)
+//			received that were 1024..1518 octets long, excluding framing bits, but including FCS. (R)
+//			(mandatory) (4 bytes)
+//
+type EthernetFramePerformanceMonitoringHistoryDataUpstream struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetframeperformancemonitoringhistorydataupstreamBME = &ManagedEntityDefinition{
+		Name:    "EthernetFramePerformanceMonitoringHistoryDataUpstream",
+		ClassID: 322,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("DropEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("Packets", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("BroadcastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("MulticastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("CrcErroredPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("UndersizePackets", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("OversizePackets", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("Packets64Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("Packets65To127Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("Packets128To255Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("Packets256To511Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("Packets512To1023Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("Packets1024To1518Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetFramePerformanceMonitoringHistoryDataUpstream (class ID 322 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetFramePerformanceMonitoringHistoryDataUpstream(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetframeperformancemonitoringhistorydataupstreamBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..6c0f001
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetPerformanceMonitoringHistoryDataClassId ClassID = ClassID(24)
+
+var ethernetperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// EthernetPerformanceMonitoringHistoryData (class ID #24)
+//	This ME collects some of the PM data for a physical Ethernet interface. Instances of this ME are
+//	created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PPTP Ethernet UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP Ethernet UNI. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Fcs Errors
+//			FCS errors:	This attribute counts frames received on a particular interface that were an
+//			integral number of octets in length but failed the FCS check. The count is incremented when the
+//			MAC service returns the frameCheckError status to the link layer control (LLC) or other MAC
+//			user. Received frames for which multiple error conditions are obtained are counted according to
+//			the error status presented to the LLC. (R) (mandatory) (4 bytes)
+//
+//		Excessive Collision Counter
+//			Excessive collision counter: This attribute counts frames whose transmission failed due to
+//			excessive collisions. (R) (mandatory) (4 bytes)
+//
+//		Late Collision Counter
+//			Late collision counter: This attribute counts the number of times that a collision was detected
+//			later than 512 bit times into the transmission of a packet. (R) (mandatory) (4 bytes)
+//
+//		Frames Too Long
+//			Frames too long: This attribute counts received frames that exceeded the maximum permitted frame
+//			size. The count is incremented when the MAC service returns the frameTooLong status to the LLC.
+//			(R) (mandatory) (4 bytes)
+//
+//		Buffer Overflows On Receive
+//			Buffer overflows on receive: This attribute counts the number of times that the receive buffer
+//			overflowed. (R) (mandatory) (4 bytes)
+//
+//		Buffer Overflows On Transmit
+//			Buffer overflows on transmit: This attribute counts the number of times that the transmit buffer
+//			overflowed. (R) (mandatory) (4 bytes)
+//
+//		Single Collision Frame Counter
+//			Single collision frame counter: This attribute counts successfully transmitted frames whose
+//			transmission was delayed by exactly one collision. (R) (mandatory) (4 bytes)
+//
+//		Multiple Collisions Frame Counter
+//			Multiple collisions frame counter: This attribute counts successfully transmitted frames whose
+//			transmission was delayed by more than one collision. (R) (mandatory) (4 bytes)
+//
+//		Sqe Counter
+//			SQE counter: This attribute counts the number of times that the SQE test error message was
+//			generated by the PLS sublayer. (R) (mandatory) (4 bytes)
+//
+//		Deferred Transmission Counter
+//			Deferred transmission counter: This attribute counts frames whose first transmission attempt was
+//			delayed because the medium was busy. The count does not include frames involved in collisions.
+//			(R) (mandatory) (4 bytes)
+//
+//		Internal Mac Transmit Error Counter
+//			Internal MAC transmit error counter: This attribute counts frames whose transmission failed due
+//			to an internal MAC sublayer transmit error. (R) (mandatory) (4 bytes)
+//
+//		Carrier Sense Error Counter
+//			Carrier sense error counter: This attribute counts the number of times that carrier sense was
+//			lost or never asserted when attempting to transmit a frame. (R) (mandatory) (4 bytes)
+//
+//		Alignment Error Counter
+//			Alignment error counter: This attribute counts received frames that were not an integral number
+//			of octets in length and did not pass the FCS check. (R) (mandatory) (4 bytes)
+//
+//		Internal Mac Receive Error Counter
+//			Internal MAC receive error counter: This attribute counts frames whose reception failed due to
+//			an internal MAC sublayer receive error. (R) (mandatory) (4 bytes)
+//
+type EthernetPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "EthernetPerformanceMonitoringHistoryData",
+		ClassID: 24,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("FcsErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("ExcessiveCollisionCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("LateCollisionCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("FramesTooLong", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("BufferOverflowsOnReceive", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("BufferOverflowsOnTransmit", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("SingleCollisionFrameCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("MultipleCollisionsFrameCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("SqeCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("DeferredTransmissionCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("InternalMacTransmitErrorCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("CarrierSenseErrorCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("AlignmentErrorCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("InternalMacReceiveErrorCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetPerformanceMonitoringHistoryData (class ID 24 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata2.go b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata2.go
new file mode 100644
index 0000000..060634f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata2.go
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetPerformanceMonitoringHistoryData2ClassId ClassID = ClassID(89)
+
+var ethernetperformancemonitoringhistorydata2BME *ManagedEntityDefinition
+
+// EthernetPerformanceMonitoringHistoryData2 (class ID #89)
+//	This ME collects additional PM data for a physical Ethernet interface. Instances of this ME are
+//	created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this Ethernet PM history data 2 ME is associated with an instance of the PPTP
+//		Ethernet UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP Ethernet UNI. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Pppoe Filtered Frame Counter
+//			PPPoE filtered frame counter: This attribute counts the number of frames discarded due to PPPoE
+//			filtering. (R) (mandatory) (4 bytes)
+//
+type EthernetPerformanceMonitoringHistoryData2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetperformancemonitoringhistorydata2BME = &ManagedEntityDefinition{
+		Name:    "EthernetPerformanceMonitoringHistoryData2",
+		ClassID: 89,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("PppoeFilteredFrameCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewEthernetPerformanceMonitoringHistoryData2 (class ID 89 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetPerformanceMonitoringHistoryData2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetperformancemonitoringhistorydata2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata3.go b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata3.go
new file mode 100644
index 0000000..557dc2c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetperformancemonitoringhistorydata3.go
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetPerformanceMonitoringHistoryData3ClassId ClassID = ClassID(296)
+
+var ethernetperformancemonitoringhistorydata3BME *ManagedEntityDefinition
+
+// EthernetPerformanceMonitoringHistoryData3 (class ID #296)
+//	This ME collects PM data associated with an Ethernet interface. It includes parameters defined
+//	in the Ethernet statistics group of [IETF RFC 2819] that are not already covered by previously
+//	defined Ethernet monitoring MEs. The received direction is from the CPE towards the network
+//	(upstream).
+//
+//	NOTE 1 – Several of the same counters are available from the Ethernet frame PM history data MEs,
+//	which are associated with MAC bridge ports. MAC bridge port association allows those MEs to be
+//	used for any Ethernet flow, in both upstream and downstream directions, while the Ethernet PM
+//	history data 3 ME can only be used on a physical IEEE 802.3 port.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	NOTE 2 – Implementers are encouraged to consider the Ethernet frame extended PM ME defined in
+//	clause 9.3.32, which collects the same counters in a more generalized way.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PPTP Ethernet UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP Ethernet UNI. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Drop Events
+//			Drop events:	The total number of events in which packets were dropped due to a lack of
+//			resources. This is not necessarily the number of packets dropped; it is the number of times this
+//			event was detected. (R) (mandatory) (4 bytes)
+//
+//		Octets
+//			Octets:	The total number of octets received from the CPE, including those in bad packets,
+//			excluding framing bytes, but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets
+//			Packets:	The total number of packets received, including bad packets, broadcast packets and
+//			multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Broadcast Packets
+//			Broadcast packets: The total number of received good packets directed to the broadcast address.
+//			This does not include multicast packets. (R) (mandatory) (4 bytes)
+//
+//		Multicast Packets
+//			Multicast packets: The total number of received good packets directed to a multicast address.
+//			This does not include broadcast packets. (R) (mandatory) (4 bytes)
+//
+//		Undersize Packets
+//			Undersize packets: The total number of packets received that were less than 64 octets long, but
+//			were otherwise well formed (excluding framing bits, but including FCS). (R) (mandatory)
+//			(4 bytes)
+//
+//		Fragments
+//			Fragments:	The total number of packets received that were less than 64 octets long, excluding
+//			framing bits but including FCS octets, and had either a bad FCS with an integral number of
+//			octets (FCS error) or a bad FCS with a non-integral number of octets (alignment error). It is
+//			entirely normal for this attribute to increment. This is because it counts both runts (which are
+//			normal occurrences due to collisions) and noise hits. (R) (mandatory) (4 bytes)
+//
+//		Jabbers
+//			Jabbers:	The total number of packets received that were longer than 1518 octets, excluding
+//			framing bits but including FCS octets, and had either a bad FCS with an integral number of
+//			octets (FCS error) or a bad FCS with a non-integral number of octets (alignment error). The
+//			range to detect jabber is between 20 ms and 150 ms. (R) (mandatory) (4 bytes)
+//
+//		Packets 64 Octets
+//			Packets 64 octets: The total number of received packets (including bad packets) that were
+//			64 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 65 To 127 Octets
+//			Packets 65 to 127 octets: The total number of received packets (including bad packets) that were
+//			65..127 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 128 To 255 Octets
+//			Packets 128 to 255 octets: The total number of packets (including bad packets) received that
+//			were 128..255 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 256 To 511 Octets
+//			Packets 256 to 511 octets: The total number of packets (including bad packets) received that
+//			were 256..511 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 512 To 1023 Octets
+//			Packets 512 to 1023 octets: The total number of packets (including bad packets) received that
+//			were 512..1023 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+//		Packets 1024 To 1518 Octets
+//			Packets 1024 to 1518 octets: The total number of packets (including bad packets) received that
+//			were 1024..1518 octets long, excluding framing bits but including FCS. (R) (mandatory) (4 bytes)
+//
+type EthernetPerformanceMonitoringHistoryData3 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetperformancemonitoringhistorydata3BME = &ManagedEntityDefinition{
+		Name:    "EthernetPerformanceMonitoringHistoryData3",
+		ClassID: 296,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("DropEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("Packets", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("BroadcastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("MulticastPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("UndersizePackets", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("Fragments", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("Jabbers", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("Packets64Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("Packets65To127Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("Packets128To255Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("Packets256To511Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("Packets512To1023Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("Packets1024To1518Octets", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewEthernetPerformanceMonitoringHistoryData3 (class ID 296 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetPerformanceMonitoringHistoryData3(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetperformancemonitoringhistorydata3BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ethernetpseudowireparameters.go b/vendor/github.com/cboling/omci/generated/ethernetpseudowireparameters.go
new file mode 100644
index 0000000..161c235
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ethernetpseudowireparameters.go
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const EthernetPseudowireParametersClassId ClassID = ClassID(400)
+
+var ethernetpseudowireparametersBME *ManagedEntityDefinition
+
+// EthernetPseudowireParameters (class ID #400)
+//	This ME contains the Ethernet pseudowire parameters. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PW Ethernet configuration data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID:	This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PW Ethernet configuration data
+//			ME. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Mtu
+//			MTU:	This attribute identifies the maximum transmission unit (bytes) that can be received from
+//			the CPE in the upstream direction. Larger frames are discarded. (R, W, set-by-create)
+//			(mandatory) (2 bytes)
+//
+type EthernetPseudowireParameters struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ethernetpseudowireparametersBME = &ManagedEntityDefinition{
+		Name:    "EthernetPseudowireParameters",
+		ClassID: 400,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("Mtu", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewEthernetPseudowireParameters (class ID 400 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewEthernetPseudowireParameters(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ethernetpseudowireparametersBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/extendedvlantaggingoperationconfigurationdata.go b/vendor/github.com/cboling/omci/generated/extendedvlantaggingoperationconfigurationdata.go
new file mode 100644
index 0000000..27846e6
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/extendedvlantaggingoperationconfigurationdata.go
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ExtendedVlanTaggingOperationConfigurationDataClassId ClassID = ClassID(171)
+
+var extendedvlantaggingoperationconfigurationdataBME *ManagedEntityDefinition
+
+// ExtendedVlanTaggingOperationConfigurationData (class ID #171)
+//	This ME organizes data associated with VLAN tagging. Regardless of its point of attachment, the
+//	specified tagging operations refer to the upstream direction. Instances of this ME are created
+//	and deleted by the OLT.
+//
+//	Relationships
+//		Zero or one instance of this ME may exist for an instance of any ME that can terminate or modify
+//		an Ethernet stream.////		When this ME is associated with a UNI-side TP, it performs its upstream classification and
+//		tagging operations before offering the upstream frame to other filtering, bridging or switching
+//		functions. In the downstream direction, the defined inverse operation is the last operation
+//		performed on the frame before offering it to the UNI-side termination.////		When this ME is associated with an ANI-side TP, it performs its upstream classification and
+//		tagging operations as the last step before transmission to the OLT, after having received the
+//		upstream frame from other filtering, bridging or switching functions. In the downstream
+//		direction, the defined inverse operation is the first operation performed on the frame before
+//		offering it to possible filter, bridge or switch functions.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute provides a unique number for each instance of this ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Association Type
+//			When the extended VLAN tagging ME is associated with the ANI side, it behaves as an upstream
+//			egress rule, and as a downstream ingress rule when the downstream mode attribute is equal to 0.
+//			When the extended VLAN tagging ME is associated with the UNI side, the extended VLAN tagging ME
+//			behaves as an upstream ingress rule, and as a downstream egress rule when the downstream mode
+//			attribute is equal to 0.
+//
+//		Received Frame Vlan Tagging Operation Table Max Size
+//			Received frame VLAN tagging operation table max size: This attribute indicates the maximum
+//			number of entries that can be set in the received frame VLAN tagging operation table. (R)
+//			(mandatory) (2 bytes)
+//
+//		Input Tpid
+//			Input TPID:	This attribute gives the special TPID value for operations on the input (filtering)
+//			side of the table. Typical values include 0x88A8 and 0x9100. (R, W) (mandatory) (2 bytes)
+//
+//		Output Tpid
+//			Output TPID: This attribute gives the special TPID value for operations on the output (tagging)
+//			side of the table. Typical values include 0x88A8 and 0x9100. (R, W) (mandatory) (2 bytes)
+//
+//		Downstream Mode
+//			All other values are reserved. (R, W) (mandatory) (1 byte)
+//
+//		Received Frame Vlan Tagging Operation Table
+//			111	Set TPID = output TPID, DEI = 1
+//
+//		Associated Me Pointer
+//			NOTE 5 – When the association type is xDSL, the two MSBs may be used to indicate a bearer
+//			channel.
+//
+//		Dscp To P Bit Mapping
+//			NOTE 6 – If certain bits in the DSCP field are to be ignored in the mapping process, the
+//			attribute should be provisioned such that all possible values of those bits produce the same
+//			P-bit mapping. This can be applied to the case where instead of full DSCP, the operator wishes
+//			to adopt the priority mechanism based on IP precedence, which needs only the three MSBs of the
+//			DSCP field.
+//
+type ExtendedVlanTaggingOperationConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	extendedvlantaggingoperationconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "ExtendedVlanTaggingOperationConfigurationData",
+		ClassID: 171,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("AssociationType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("ReceivedFrameVlanTaggingOperationTableMaxSize", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: Uint16Field("InputTpid", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: Uint16Field("OutputTpid", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: ByteField("DownstreamMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6: TableField("ReceivedFrameVlanTaggingOperationTable", TableInfo{nil, 16}, mapset.NewSetWith(Read, Write), false, false, false, 6),
+			7: Uint16Field("AssociatedMePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8: MultiByteField("DscpToPBitMapping", 24, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewExtendedVlanTaggingOperationConfigurationData (class ID 171 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewExtendedVlanTaggingOperationConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(extendedvlantaggingoperationconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastchannelconfigurationprofile.go b/vendor/github.com/cboling/omci/generated/fastchannelconfigurationprofile.go
new file mode 100644
index 0000000..2f982c9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastchannelconfigurationprofile.go
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastChannelConfigurationProfileClassId ClassID = ClassID(432)
+
+var fastchannelconfigurationprofileBME *ManagedEntityDefinition
+
+// FastChannelConfigurationProfile (class ID #432)
+//	This ME contains the FAST channel configuration profile for an xDSL UNI. An instance of this ME
+//	is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Maximum Net Data Rate Maxndr
+//			Maximum net data rate (MAXNDR): This attribute specifies the value of the maximum net data rate.
+//			See clause 11.4.2.2 of [ITU-T G.9701]. Valid values range from 0 (0 kbit/s) to 4294967295
+//			(2^32–1 kbit/s). See clause 7.2.1.1 of [ITUT G.997.2]. (R, W) (mandatory) (4 bytes)
+//
+//		Minimum Expected Throughput Minetr
+//			Minimum expected throughput (MINETR): This attribute specifies the value of the minimum expected
+//			throughput. See clause 11.4.2.1 of [ITU-T G.9701]. Valid values range from 0 (0 kbit/s) to
+//			4294967295 (2^32–1 kbit/s). See clause 7.2.1.2 of [ITU-T G.997.2]. (R, W) (mandatory) (4 bytes)
+//
+//		Maximum Gamma Data Rate Maxgdr
+//			Maximum gamma data rate (MAXGDR): This attribute specifies the maximum value of the GDR (see
+//			clause 7.11.1.3). The GDR shall not exceed MAXGDR at the start of showtime and during showtime.
+//			Valid values range from 0 (0 kbit/s) to 4294967295 (2^32–1 kbit/s). See clause 7.2.1.3 of [ITU-T
+//			G.997.2]. (R, W) (mandatory) (4 bytes)
+//
+//		Minimum Gamma Data Rate Mingdr
+//			Minimum gamma data rate (MINGDR): This attribute specifies the minimum value of the GDR (see
+//			clause 7.11.1.3). The GDR may be lower than MINGDR. If the GDR is lower than MINGDR at
+//			initialization or when GDR becomes lower than MINGDR during showtime, a TCA occurs. Valid values
+//			range from 0 (0 kbit/s) to 4294967295 (2^32–1 kbit/s). See clause 7.2.1.4 of [ITU-T G.997.2].
+//			(R, W) (mandatory) (4 bytes)
+//
+//		Maximum Delay Delaymax
+//			Maximum delay (DELAYMAX): This attribute specifies the maximum allowed delay for retransmission.
+//			See clause 9.8 of [ITU-T G.9701]. The ITUT G.9701 control parameter delay_max is set to the same
+//			value as the maximum delay. See clause 11.4.2.3 of [ITU-T G.9701]. Valid values range from 4
+//			(1 ms) to 252 (63 ms) in steps of 0.25 ms. See clause 7.2.2.1 of [ITUT G.997.2]. (R, W)
+//			(mandatory) (4 bytes)
+//
+//		Minimum Impulse Noise Protection Against Shine Inpmin_Shine
+//			Minimum impulse noise protection against SHINE (INPMIN_SHINE): This attribute specifies the
+//			minimum INP against SHINE. See clause 9.8 of [ITU-T G.9701]. The ITU-T G.9701 control parameter
+//			INP_min_shine is set to the same value as the minimum INP against SHINE. See clause 11.4.2.4 of
+//			[ITU-T G.9701]. Valid values range from 0 to 520 (520 symbol periods). See clause 7.2.2.2 of
+//			[ITUT G.997.2]. (R, W) (mandatory) (2 bytes)
+//
+//		Shine Ratio Shineratio
+//			SHINE ratio (SHINERATIO): This attribute specifies the SHINE ratio that is used in the
+//			definition of the expected throughput rate (ETR). See clause 9.8 of [ITUT G.9701]. The ITU-T
+//			G.9701 control parameter SHINEratio is set to the same value as the SHINE ratio. See clause
+//			11.4.2.5 of [ITU-T G.9701]. The value is expressed in units of 0.001, Valid values range from 0
+//			to 100 (0.01) in steps of 0.001. See clause 7.2.2.3 of [ITU-T G.997.2]. (R, W) (mandatory)
+//			(1 byte)
+//
+//		Minimum Impulse Noise Protection Against Rein Inpmin_Rein
+//			Minimum impulse noise protection against REIN (INPMIN_REIN): This attribute specifies the
+//			minimum INP against REIN. See clause 9.8 of [ITU-T G.9701]. The ITU-T G.9701 control parameter
+//			INP_min_rein is set to the same value as the minimum INP against REIN. See clause 11.4.2.6 of
+//			[ITU-T G.9701]. Valid values range from 0 to 63 (63 symbol periods). See clause 7.2.2.4 of
+//			[ITU-T G.997.2]. (R, W) (mandatory) (1 byte)
+//
+//		Rein Inter_Arrival Time Iat_Rein
+//			(R, W) (mandatory) (1 byte)
+//
+//		Minimum Reed_Solomon Rfec_Nfec Ratio Rnratio
+//			Minimum Reed-Solomon RFEC/NFEC ratio (RNRATIO): This attribute specifies the minimal required
+//			ratio, RFEC/NFEC, of Reed-Solomon code parameters. The ITU-T G.9701 control parameter rnratio is
+//			set to the same value as the minimum Reed-Solomon RFEC/NFEC ratio. See clause 11.4.2.8 of
+//			[ITUT G.9701]. The value is expressed in units of 1/32, Valid values range from 0 to 8 (1/4).
+//			See clause 7.2.2.6 of [ITU-T G.997.2]. (R, W) (mandatory) (1 byte)
+//
+//		Rtx_Tc Testmode Rtx_Testmode
+//			RTX-TC testmode (RTX_TESTMODE): This Boolean attribute specifies whether the retransmission test
+//			mode defined in clause 9.8.3.1.2 [ITU-T G.9701] is enabled (true) or disabled (disabled). See
+//			clause 7.2.2.7 of [ITU-T G.997.2]. (R, W) (optional) (1 byte)
+//
+type FastChannelConfigurationProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastchannelconfigurationprofileBME = &ManagedEntityDefinition{
+		Name:    "FastChannelConfigurationProfile",
+		ClassID: 432,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint32Field("MaximumNetDataRateMaxndr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 0),
+			1:  Uint32Field("MinimumExpectedThroughputMinetr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  Uint32Field("MaximumGammaDataRateMaxgdr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  Uint32Field("MinimumGammaDataRateMingdr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  Uint32Field("MaximumDelayDelaymax", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  Uint16Field("MinimumImpulseNoiseProtectionAgainstShineInpminShine", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  ByteField("ShineRatioShineratio", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("MinimumImpulseNoiseProtectionAgainstReinInpminRein", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("ReinInterArrivalTimeIatRein", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("MinimumReedSolomonRfecNfecRatioRnratio", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: ByteField("RtxTcTestmodeRtxTestmode", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+		},
+	}
+}
+
+// NewFastChannelConfigurationProfile (class ID 432 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastChannelConfigurationProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastchannelconfigurationprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastdatapathconfigurationprofile.go b/vendor/github.com/cboling/omci/generated/fastdatapathconfigurationprofile.go
new file mode 100644
index 0000000..5e27a21
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastdatapathconfigurationprofile.go
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastDataPathConfigurationProfileClassId ClassID = ClassID(433)
+
+var fastdatapathconfigurationprofileBME *ManagedEntityDefinition
+
+// FastDataPathConfigurationProfile (class ID #433)
+//	This ME contains FAST the data path configuration profile for an xDSL UNI. An instance of this
+//	ME is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Tps_Tc Testmode Tps_Testmode
+//			TPS-TC testmode (TPS_TESTMODE): This Boolean attribute specifies whether the TPSTC test mode
+//			defined in clause 8.3.1 [ITU-T G.9701] is enabled (true) or disabled (disabled). See clause
+//			7.3.1 of [ITUT G.997.2]. (R, W) (mandatory) (1 byte)
+//
+type FastDataPathConfigurationProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastdatapathconfigurationprofileBME = &ManagedEntityDefinition{
+		Name:    "FastDataPathConfigurationProfile",
+		ClassID: 433,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("TpsTcTestmodeTpsTestmode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewFastDataPathConfigurationProfile (class ID 433 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastDataPathConfigurationProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastdatapathconfigurationprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastlineinventoryandstatusdatapart2.go b/vendor/github.com/cboling/omci/generated/fastlineinventoryandstatusdatapart2.go
new file mode 100644
index 0000000..18efd7b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastlineinventoryandstatusdatapart2.go
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastLineInventoryAndStatusDataPart2ClassId ClassID = ClassID(436)
+
+var fastlineinventoryandstatusdatapart2BME *ManagedEntityDefinition
+
+// FastLineInventoryAndStatusDataPart2 (class ID #436)
+//	This ME contains part 3 of the FAST line inventory and status data with attributes specific to
+//	[ITU T G.997.2]. The ONU automatically creates or deletes an instance of this ME upon the
+//	creation or deletion of a PPTP xDSL UNI part 1.
+//
+//	Relationships
+//		This is one of the status data MEs associated with an xDSL UNI. It is required only if FAST is
+//		supported by the PPTP. The ONU automatically creates or deletes an instance of this ME upon
+//		creation or deletion of a PPTP xDSL UNI part 1 that supports these attributes.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Downstream Fra Operation Stamp_Frads
+//			(R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Upstream Fra Operation Stamp_Fraus
+//			Date/time-stamping of last successful upstream FRA operation (STAMP-FRAus): This parameter
+//			reports the date/time of the last successful FTU-O initiated FRA execution that has modified the
+//			bits allocation. See clause 7.10.14.6 of [ITUT G.997.2]. The format of this parameter is the
+//			same as STAMPTESTNE. (R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Downstream Rpa Operation Stamp_Rpads
+//			Date/time-stamping of last successful downstream RPA operation (STAMP-RPAds): This parameter
+//			reports the date/time of the last successful FTU-R initiated RPA execution that has modified the
+//			bits allocation for the RMC. See clause 7.10.14.7 of [ITU-T G.997.2]. The format of this
+//			parameter is the same as STAMP-TEST-NE. (R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Upstream Rpa Operation Stamp_Rpaus
+//			Date/time-stamping of last successful upstream RPA operation (STAMP-RPAus): This parameter
+//			reports the date/time of the last successful FTU-O initiated RPA execution that has modified the
+//			bits allocation for the RMC. See clause 7.10.14.8 of [ITU-T G.997.2]. The format of this
+//			parameter is the same as STAMP-TEST-NE. (R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Downstream Tiga Operation Stamp_Tiga
+//			Date/time-stamping of last successful downstream TIGA operation (STAMP-TIGA): This parameter
+//			reports the date/time of the last successful FTU-O initiated TIGA execution. See clause
+//			7.10.14.9 of [ITU-T G.997.2]. The format of this parameter is the same as STAMP-TEST-NE. (R)
+//			(optional) (7 bytes)
+//
+type FastLineInventoryAndStatusDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastlineinventoryandstatusdatapart2BME = &ManagedEntityDefinition{
+		Name:    "FastLineInventoryAndStatusDataPart2",
+		ClassID: 436,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("DateTimeStampingOfLastSuccessfulDownstreamFraOperationStampFrads", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 1),
+			2: MultiByteField("DateTimeStampingOfLastSuccessfulUpstreamFraOperationStampFraus", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 2),
+			3: MultiByteField("DateTimeStampingOfLastSuccessfulDownstreamRpaOperationStampRpads", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4: MultiByteField("DateTimeStampingOfLastSuccessfulUpstreamRpaOperationStampRpaus", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: MultiByteField("DateTimeStampingOfLastSuccessfulDownstreamTigaOperationStampTiga", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewFastLineInventoryAndStatusDataPart2 (class ID 436 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastLineInventoryAndStatusDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastlineinventoryandstatusdatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastvectoringlineconfigurationextensions.go b/vendor/github.com/cboling/omci/generated/fastvectoringlineconfigurationextensions.go
new file mode 100644
index 0000000..d1eda20
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastvectoringlineconfigurationextensions.go
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastVectoringLineConfigurationExtensionsClassId ClassID = ClassID(434)
+
+var fastvectoringlineconfigurationextensionsBME *ManagedEntityDefinition
+
+// FastVectoringLineConfigurationExtensions (class ID #434)
+//	This ME extends FAST line configuration MEs with attributes that are specific to vectoring. An
+//	instance of this ME is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of an xDSL UNI.////		The overall FAST line configuration MEs is modelled in several parts, all of which are
+//		associated together through a common ME ID (the client PPTP xDSL UNI part 3 has a single
+//		pointer, which refers to the entire set of line configuration parts).
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Fext Cancellation Enabling_Disabling Upstream Fext_To_Cancel_Enableus
+//			FEXT cancellation enabling/disabling upstream (FEXT_TO_CANCEL_ENABLEus): A value of 1 enables
+//			and a value of 0 disables FEXT cancellation in the upstream direction from all the other
+//			vectored lines into the line in the vectored group. See clause 7.1.7.2 of [ITU-T G.997.2].
+//			(R, W) (mandatory) (1 byte)
+//
+//		Fext Cancellation Enabling_Disabling Downstream Fext_To_Cancel_Enableds
+//			FEXT cancellation enabling/disabling downstream (FEXT_TO_CANCEL_ENABLEds): A value of 1 enables
+//			and a value of 0 disables FEXT cancellation in the downstream direction from all the other
+//			vectored lines into the line in the vectored group. See clause 7.1.7.1 of [ITUT G.997.2]. (R, W)
+//			(mandatory) (1 byte)
+//
+type FastVectoringLineConfigurationExtensions struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastvectoringlineconfigurationextensionsBME = &ManagedEntityDefinition{
+		Name:    "FastVectoringLineConfigurationExtensions",
+		ClassID: 434,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("FextCancellationEnablingDisablingUpstreamFextToCancelEnableus", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: ByteField("FextCancellationEnablingDisablingDownstreamFextToCancelEnableds", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+		},
+	}
+}
+
+// NewFastVectoringLineConfigurationExtensions (class ID 434 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastVectoringLineConfigurationExtensions(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastvectoringlineconfigurationextensionsBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastxtu-cperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/fastxtu-cperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..9b20a8c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastxtu-cperformancemonitoringhistorydata.go
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastXtuCPerformanceMonitoringHistoryDataClassId ClassID = ClassID(437)
+
+var fastxtucperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// FastXtuCPerformanceMonitoringHistoryData (class ID #437)
+//	This ME collects PM data on the xTU C to xTU R path as seen from the xTU-C. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R, set-
+//			by-create) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Successful Fra Counter
+//			Successful FRA counter: This attribute counts the successful FRA primitives (success_FRA). The
+//			successful FRA primitive (success_FRA) is defined in clause 11.3.1.6 of [ITU-T G.9701]. See
+//			clause 7.7.22 of [ITU-T G.997.2]. (R) (mandatory) (4 bytes)
+//
+//		Successful Rpa Counter
+//			Successful RPA counter: This attribute counts the successful RPA primitives (success_RPA). The
+//			successful RPA primitive (success_RPA) is defined in clause 11.3.1.6 of [ITU-T G.9701]. See
+//			clause 7.7.23 of [ITU-T G.997.2] (R) (optional) (4 bytes)
+//
+//		Successful Tiga Counter
+//			Successful TIGA counter: This attribute counts the successful TIGA primitives (success_TIGA).
+//			The successful TIGA primitive (success_TIGA) is defined in clause 11.3.1.6 of [ITU-T G.9701].
+//			Reported only with the near-end measured time, invalid data flag and timestamp. See clause
+//			7.7.24 of [ITUT G.997.2] (R) (optional) (4 bytes)
+//
+type FastXtuCPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastxtucperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "FastXtuCPerformanceMonitoringHistoryData",
+		ClassID: 437,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("SuccessfulFraCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("SuccessfulRpaCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: Uint32Field("SuccessfulTigaCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewFastXtuCPerformanceMonitoringHistoryData (class ID 437 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastXtuCPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastxtucperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/fastxtu-rperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/fastxtu-rperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..dea13c9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/fastxtu-rperformancemonitoringhistorydata.go
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const FastXtuRPerformanceMonitoringHistoryDataClassId ClassID = ClassID(438)
+
+var fastxturperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// FastXtuRPerformanceMonitoringHistoryData (class ID #438)
+//	This ME collects PM data of the xTU C to xTU R path as seen from the xTU-R. Instances of this ME
+//	are created and deleted by the OLT. For a complete discussion of generic PM architecture, refer
+//	to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R, set-
+//			by-create) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Successful Fra Counter
+//			Successful FRA counter: This attribute counts the successful FRA primitives (success_FRA). The
+//			successful FRA primitive (success_FRA) is defined in clause 11.3.1.6 of [ITU-T G.9701]. See
+//			clause 7.7.22 of [ITU-T G.997.2] (R) (mandatory) (4 bytes)
+//
+//		Successful Rpa Counter
+//			Successful RPA counter: This attribute counts the successful RPA primitives (success_RPA). The
+//			successful RPA primitive (success_RPA) is defined in clause 11.3.1.6 of [ITU-T G.9701]. See
+//			clause 7.7.23 of [ITU-T G.997.2] (R) (optional) (4 bytes)
+//
+type FastXtuRPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	fastxturperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "FastXtuRPerformanceMonitoringHistoryData",
+		ClassID: 438,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("SuccessfulFraCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("SuccessfulRpaCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+		},
+	}
+}
+
+// NewFastXtuRPerformanceMonitoringHistoryData (class ID 438 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewFastXtuRPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(fastxturperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/galethernetperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/galethernetperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..3ae384d
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/galethernetperformancemonitoringhistorydata.go
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const GalEthernetPerformanceMonitoringHistoryDataClassId ClassID = ClassID(276)
+
+var galethernetperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// GalEthernetPerformanceMonitoringHistoryData (class ID #276)
+//	This ME collects PM data associated with a GEM IW TP when the GEM layer supports an Ethernet
+//	service. Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the GEM IW TP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the GEM IW TP. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Discarded Frames
+//			Discarded frames: This attribute counts the number of downstream GEM frames discarded for any
+//			reason [erroneous frame check sequence (FCS), too long length, buffer overflow, etc.]. (R)
+//			(mandatory) (4 bytes)
+//
+type GalEthernetPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	galethernetperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "GalEthernetPerformanceMonitoringHistoryData",
+		ClassID: 276,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("DiscardedFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewGalEthernetPerformanceMonitoringHistoryData (class ID 276 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewGalEthernetPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(galethernetperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/galethernetprofile.go b/vendor/github.com/cboling/omci/generated/galethernetprofile.go
new file mode 100644
index 0000000..a83aea8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/galethernetprofile.go
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const GalEthernetProfileClassId ClassID = ClassID(272)
+
+var galethernetprofileBME *ManagedEntityDefinition
+
+// GalEthernetProfile (class ID #272)
+//	This ME organizes data that describe the gigabit-capable passive optical network transmission
+//	convergence layer (GTC) adaptation layer processing functions of the ONU for Ethernet services.
+//	It is used with the GEM IW TP ME.
+//
+//	Instances of this ME are created and deleted on request of the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the GEM IW TP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Maximum Gem Payload Size
+//			Maximum GEM payload size: This attribute defines the maximum payload size generated in the
+//			associated GEM IW TP ME. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+type GalEthernetProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	galethernetprofileBME = &ManagedEntityDefinition{
+		Name:    "GalEthernetProfile",
+		ClassID: 272,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("MaximumGemPayloadSize", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewGalEthernetProfile (class ID 272 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewGalEthernetProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(galethernetprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/geminterworkingterminationpoint.go b/vendor/github.com/cboling/omci/generated/geminterworkingterminationpoint.go
new file mode 100644
index 0000000..aa6693f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/geminterworkingterminationpoint.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const GemInterworkingTerminationPointClassId ClassID = ClassID(266)
+
+var geminterworkingterminationpointBME *ManagedEntityDefinition
+
+// GemInterworkingTerminationPoint (class ID #266)
+//	An instance of this ME represents a point in the ONU where the IW of a bearer service (usually
+//	Ethernet) to the GEM layer takes place. At this point, GEM packets are generated from the bearer
+//	bit stream (e.g., Ethernet) or the bearer bit stream is reconstructed from GEM packets.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		One instance of this ME exists for each transformation of a data stream into GEM frames and vice
+//		versa.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Gem Port Network Ctp Connectivity Pointer
+//			GEM port network CTP connectivity pointer: This attribute points to an instance of the GEM port
+//			network CTP. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interworking Option
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Service Profile Pointer
+//			NOTE – The video return path (VRP) service profile is defined in [ITU-T G.984.4].
+//
+//		Interworking Termination Point Pointer
+//			In all other GEM services, the relationship between the related service TP and this GEM IW TP is
+//			derived from other ME relations; this attribute is set to a null pointer and not used. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Pptp Counter
+//			PPTP counter: This value reports the number of PPTP ME instances associated with this GEM IW TP.
+//			(R) (optional) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Gal Profile Pointer
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Gal Loopback Configuration
+//			The default value of this attribute is 0. When the IW option is 6 (downstream broadcast), this
+//			attribute is not used. (R, W) (mandatory) (1 byte)
+//
+type GemInterworkingTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	geminterworkingterminationpointBME = &ManagedEntityDefinition{
+		Name:    "GemInterworkingTerminationPoint",
+		ClassID: 266,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("GemPortNetworkCtpConnectivityPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("InterworkingOption", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("ServiceProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("InterworkingTerminationPointPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: ByteField("PptpCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 6),
+			7: Uint16Field("GalProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8: ByteField("GalLoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewGemInterworkingTerminationPoint (class ID 266 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewGemInterworkingTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(geminterworkingterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/gemportnetworkctp.go b/vendor/github.com/cboling/omci/generated/gemportnetworkctp.go
new file mode 100644
index 0000000..84586f9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/gemportnetworkctp.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const GemPortNetworkCtpClassId ClassID = ClassID(268)
+
+var gemportnetworkctpBME *ManagedEntityDefinition
+
+// GemPortNetworkCtp (class ID #268)
+//	This ME represents the termination of a GEM port on an ONU. This ME aggregates connectivity
+//	functionality from the network view and alarms from the network element view as well as
+//	artefacts from trails.
+//
+//	Instances of the GEM port network CTP ME are created and deleted by the OLT. An instance of GEM
+//	port network CTP can be deleted only when no GEM IW TP or GEM port network CTP PM history data
+//	are associated with it. It is the responsibility of the OLT to make sure that the ONU
+//	configuration meets this condition.
+//
+//	In ITU-T G.984 systems, when a GEM port network CTP is created, its encryption state is by
+//	default not encrypted. If the OLT wishes to configure the GEM port to use encryption, it must
+//	send the appropriate PLOAM message. This applies equally to new CTPs and to CTPs that are re-
+//	created after an MIB reset.
+//
+//	In ITU-T G.987 systems, GEM ports are dynamically encrypted. If it is intended to encrypt the
+//	GEM port, the OLT must configure a key ring to be used, and the key must be known to the ONU at
+//	run time.
+//
+//	Relationships
+//		An instance of the GEM port network CTP ME may be associated with an instance of the T-CONT and
+//		GEM IW TP MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Port_Id
+//			NOTE 1 – While nothing forbids the existence of several GEM port network CTPs with the same
+//			port-ID value, downstream traffic is modelled as being delivered to all such GEM port network
+//			CTPs. Be aware of potential difficulties associated with defining downstream flows and
+//			aggregating PM statistics.
+//
+//		T_Cont Pointer
+//			T-CONT pointer: This attribute points to a T-CONT instance. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Direction
+//			Direction:	This attribute specifies whether the GEM port is used for UNI-to-ANI (1), ANI-to-UNI
+//			(2), or bidirectional (3) connection. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Traffic Management Pointer For Upstream
+//			Traffic management pointer for upstream: If the traffic management option attribute in the ONU-G
+//			ME is 0 (priority controlled) or 2 (priority and rate controlled), this pointer specifies the
+//			priority queue ME serving this GEM port network CTP. If the traffic management option attribute
+//			is 1 (rate controlled), this attribute redundantly points to the TCONT serving this GEM port
+//			network CTP. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Traffic Descriptor Profile Pointer For Upstream
+//			See also Appendix II.
+//
+//		Uni Counter
+//			UNI counter: This attribute reports the number of instances of UNI-G ME associated with this GEM
+//			port network CTP. (R) (optional) (1 byte)
+//
+//		Priority Queue Pointer For Down Stream
+//			NOTE 2 – If the GEM port network CTP is associated with more than one UNI (downstream
+//			multicast), the downstream priority queue pointer defines a pattern (e.g., queue number 3 for a
+//			given UNI) to be replicated (i.e., to queue number 3) at the other affected UNIs.
+//
+//		Encryption State
+//			Encryption state: This attribute indicates the current state of the GEM port network CTP's
+//			encryption. Legal values are defined to be the same as those of the security mode attribute of
+//			the ONU2-G, with the exception that attribute value 0 indicates an unencrypted GEM port. (R)
+//			(optional) (1 byte)
+//
+//		Traffic Descriptor Profile Pointer For Downstream
+//			See also Appendix II.
+//
+//		Encryption Key Ring
+//			Other values are reserved.
+//
+type GemPortNetworkCtp struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	gemportnetworkctpBME = &ManagedEntityDefinition{
+		Name:    "GemPortNetworkCtp",
+		ClassID: 268,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("PortId", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("TContPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("Direction", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("TrafficManagementPointerForUpstream", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("TrafficDescriptorProfilePointerForUpstream", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6:  ByteField("UniCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7:  Uint16Field("PriorityQueuePointerForDownStream", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("EncryptionState", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint16Field("TrafficDescriptorProfilePointerForDownstream", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 9),
+			10: ByteField("EncryptionKeyRing", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 10),
+		},
+	}
+}
+
+// NewGemPortNetworkCtp (class ID 268 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewGemPortNetworkCtp(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(gemportnetworkctpBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/gemportnetworkctpperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/gemportnetworkctpperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..233afec
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/gemportnetworkctpperformancemonitoringhistorydata.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const GemPortNetworkCtpPerformanceMonitoringHistoryDataClassId ClassID = ClassID(341)
+
+var gemportnetworkctpperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// GemPortNetworkCtpPerformanceMonitoringHistoryData (class ID #341)
+//	This ME collects GEM frame PM data associated with a GEM port network CTP. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	NOTE 1 – One might expect to find some form of impaired or discarded frame count associated with
+//	a GEM port. However, the only impairment that might be detected at the GEM frame level would be
+//	a corrupted GEM frame header. In this case, no part of the header could be considered reliable
+//	including the port ID. For this reason, there is no impaired or discarded frame count in this
+//	ME.
+//
+//	NOTE 2 – This ME replaces the GEM port performance history data ME and is preferred for new
+//	implementations.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the GEM port network CTP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the GEM port network CTP. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Transmitted Gem Frames
+//			Transmitted GEM frames: This attribute counts GEM frames transmitted on the monitored GEM port.
+//			(R) (mandatory) (4 bytes)
+//
+//		Received Gem Frames
+//			Received GEM frames: This attribute counts GEM frames received correctly on the monitored GEM
+//			port. A correctly received GEM frame is one that does not contain uncorrectable errors and has a
+//			valid header error check (HEC). (R) (mandatory) (4 bytes)
+//
+//		Received Payload Bytes
+//			Received payload bytes: This attribute counts user payload bytes received on the monitored GEM
+//			port. (R) (mandatory) (8 bytes)
+//
+//		Transmitted Payload Bytes
+//			Transmitted payload bytes: This attribute counts user payload bytes transmitted on the monitored
+//			GEM port. (R) (mandatory) (8 bytes)
+//
+//		Encryption Key Errors
+//			NOTE 4 – GEM PM counts each non-idle GEM frame, whether it contains an entire user frame or only
+//			a fragment of a user frame.
+//
+type GemPortNetworkCtpPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	gemportnetworkctpperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "GemPortNetworkCtpPerformanceMonitoringHistoryData",
+		ClassID: 341,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("TransmittedGemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("ReceivedGemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint64Field("ReceivedPayloadBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint64Field("TransmittedPayloadBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("EncryptionKeyErrors", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+		},
+	}
+}
+
+// NewGemPortNetworkCtpPerformanceMonitoringHistoryData (class ID 341 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewGemPortNetworkCtpPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(gemportnetworkctpperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/ieee802.1pmapperserviceprofile.go b/vendor/github.com/cboling/omci/generated/ieee802.1pmapperserviceprofile.go
new file mode 100644
index 0000000..1906172
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/ieee802.1pmapperserviceprofile.go
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Ieee8021PMapperServiceProfileClassId ClassID = ClassID(130)
+
+var ieee8021pmapperserviceprofileBME *ManagedEntityDefinition
+
+// Ieee8021PMapperServiceProfile (class ID #130)
+//	This ME associates the priorities of IEEE 802.1p [IEEE 802.1D] priority tagged frames with
+//	specific connections. This ME directs upstream traffic to the designated GEM ports. Downstream
+//	traffic arriving on any of the IEEE 802.1p mapper's GEM ports is directed to the mapper's root
+//	TP. Other mechanisms exist to direct downstream traffic, specifically a direct pointer to a
+//	downstream queue from the GEM port network CTP. If such an alternative is used, it should be
+//	provisioned to be consistent with the flow model of the mapper.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		At its root, an instance of this ME may be associated with zero or one instance of a PPTP UNI,
+//		MAC bridge port configuration data, or any type of IW TP ME that carries IEEE 802 traffic. Each
+//		of its eight branches is associated with zero or one GEM IW TP.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Tp Pointer
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 0
+//			Interwork TP pointer for P-bit priority 0:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 1
+//			Interwork TP pointer for P-bit priority 1:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 2
+//			Interwork TP pointer for P-bit priority 2:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 3
+//			Interwork TP pointer for P-bit priority 3:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 4
+//			Interwork TP pointer for P-bit priority 4:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 5
+//			Interwork TP pointer for P-bit priority 5:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 6
+//			Interwork TP pointer for P-bit priority 6:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Interwork Tp Pointer For P_Bit Priority 7
+//			Interwork TP pointer for P-bit priority 7:	(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Unmarked Frame Option
+//			Untagged downstream frames are passed through the mapper transparently.
+//
+//		Dscp To P Bit Mapping
+//			NOTE – If certain bits in the DSCP field are to be ignored in the mapping process, the attribute
+//			should be provisioned such that all possible values of those bits produce the same P-bit
+//			mapping. This can be applied to the case where instead of full DSCP, the operator wishes to
+//			adopt the priority mechanism based on IP precedence, which needs only the three MSBs of the DSCP
+//			field.
+//
+//		Default P Bit Assumption
+//			Default P-bit assumption: This attribute is valid when the unmarked frame option attribute is
+//			set to 1. In its LSBs, the default Pbit assumption attribute contains the default PCP field to
+//			be assumed. The unmodified frame is then directed to the GEM IW TP indicated by the interwork TP
+//			pointer mappings. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tp Type
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+type Ieee8021PMapperServiceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	ieee8021pmapperserviceprofileBME = &ManagedEntityDefinition{
+		Name:    "Ieee8021PMapperServiceProfile",
+		ClassID: 130,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("TpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("InterworkTpPointerForPBitPriority0", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("InterworkTpPointerForPBitPriority1", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("InterworkTpPointerForPBitPriority2", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("InterworkTpPointerForPBitPriority3", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint16Field("InterworkTpPointerForPBitPriority4", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  Uint16Field("InterworkTpPointerForPBitPriority5", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  Uint16Field("InterworkTpPointerForPBitPriority6", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  Uint16Field("InterworkTpPointerForPBitPriority7", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: ByteField("UnmarkedFrameOption", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: MultiByteField("DscpToPBitMapping", 24, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: ByteField("DefaultPBitAssumption", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 12),
+			13: ByteField("TpType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 13),
+		},
+	}
+}
+
+// NewIeee8021PMapperServiceProfile (class ID 130 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewIeee8021PMapperServiceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(ieee8021pmapperserviceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/interworkingvccterminationpoint.go b/vendor/github.com/cboling/omci/generated/interworkingvccterminationpoint.go
new file mode 100644
index 0000000..c856c28
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/interworkingvccterminationpoint.go
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const InterworkingVccTerminationPointClassId ClassID = ClassID(14)
+
+var interworkingvccterminationpointBME *ManagedEntityDefinition
+
+// InterworkingVccTerminationPoint (class ID #14)
+//	An instance of this ME represents a point in the ONU where the IW of a service or underlying
+//	physical infrastructure (e.g., ADSL) to an ATM layer takes place. At this point, ATM cells are
+//	generated from a bit stream (e.g., Ethernet) or a bit stream is reconstructed from ATM cells.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		One instance of this ME exists for each occurrence of transformation of a data stream into ATM
+//		cells and vice versa.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Vci Value
+//			VCI value:	This attribute identifies the VCI value associated with this IW VCC TP. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Vp Network Ctp Connectivity Pointer
+//			VP network CTP connectivity pointer: This attribute points to the VP network CTP associated with
+//			this IW VCC TP. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Deprecated 1
+//			Deprecated 1: Not used; should be set to 0. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Deprecated 2
+//			Deprecated 2: Not used; should be set to 0. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Aal5 Profile Pointer
+//			AAL5 profile pointer: This attribute points to an instance of the AAL5 profile. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Deprecated 3
+//			Deprecated 3: Not used; should be set to 0. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Aal Loopback Configuration
+//			The default value of this attribute is 0. (R, W) (mandatory) (1 byte)
+//
+//		Pptp Counter
+//			PPTP counter: This value is the number of instances of PPTP MEs associated with this instance of
+//			the IW VCC TP. (R) (optional) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+type InterworkingVccTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	interworkingvccterminationpointBME = &ManagedEntityDefinition{
+		Name:    "InterworkingVccTerminationPoint",
+		ClassID: 14,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("VciValue", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("VpNetworkCtpConnectivityPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("Deprecated1", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, true, 3),
+			4: Uint16Field("Deprecated2", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, true, 4),
+			5: Uint16Field("Aal5ProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: Uint16Field("Deprecated3", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, true, 6),
+			7: ByteField("AalLoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8: ByteField("PptpCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 9),
+		},
+	}
+}
+
+// NewInterworkingVccTerminationPoint (class ID 14 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewInterworkingVccTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(interworkingvccterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/iphostconfigdata.go b/vendor/github.com/cboling/omci/generated/iphostconfigdata.go
new file mode 100644
index 0000000..f41f3a9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/iphostconfigdata.go
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const IpHostConfigDataClassId ClassID = ClassID(134)
+
+var iphostconfigdataBME *ManagedEntityDefinition
+
+// IpHostConfigData (class ID #134)
+//	The IP host config data configures IPv4 based services offered on the ONU. The ONU automatically
+//	creates instances of this ME if IP host services are available. A possible IPv6 stack is
+//	supported through the IPv6 host config data ME. In this clause, references to IP addresses are
+//	understood to mean IPv4.
+//
+//	Relationships
+//		An instance of this ME is associated with the ONU ME. Any number of TCP/UDP config data MEs can
+//		point to the IP host config data, to model any number of ports and protocols. Performance may be
+//		monitored through an implicitly linked IP host PM history data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The ONU creates
+//			as many instances as there are independent IPv4 stacks on the ONU. To facilitate discovery, IP
+//			host config data MEs should be numbered from 0 upwards. The ONU should create IP(v4) and IPv6
+//			host config data MEs with separate ME IDs, such that other MEs can use a single TP type
+//			attribute to link with either. (R) (mandatory) (2 bytes)
+//
+//		Ip Options
+//			(R, W) (mandatory) (1 byte)
+//
+//		Mac Address
+//			MAC address: This attribute indicates the MAC address used by the IP node. (R) (mandatory)
+//			(6 bytes)
+//
+//		Onu Identifier
+//			Onu identifier: A unique ONU identifier string. If set to a non-null value, this string is used
+//			instead of the MAC address in retrieving dynamic host configuration protocol (DHCP) parameters.
+//			If the string is shorter than 25 characters, it must be null terminated. Its default value is 25
+//			null bytes. (R, W) (mandatory) (25 bytes)
+//
+//		Ip Address
+//			IP address:	The address used for IP host services; this attribute has the default value 0.
+//			(R, W) (mandatory) (4 bytes)
+//
+//		Mask
+//			Mask:	The subnet mask for IP host services; this attribute has the default value 0. (R, W)
+//			(mandatory) (4 bytes)
+//
+//		Gateway
+//			Gateway:	The default gateway address used for IP host services; this attribute has the default
+//			value 0. (R, W) (mandatory) (4 bytes)
+//
+//		Primary Dns
+//			Primary DNS: The address of the primary DNS server; this attribute has the default value 0.
+//			(R, W) (mandatory) (4 bytes)
+//
+//		Secondary Dns
+//			Secondary DNS: The address of the secondary DNS server; this attribute has the default value 0.
+//			(R, W) (mandatory) (4 bytes)
+//
+//		Current Address
+//			Current address: Current address of the IP host service. (R) (optional) (4 bytes)
+//
+//		Current Mask
+//			Current mask: Current subnet mask for the IP host service. (R) (optional) (4 bytes)
+//
+//		Current Gateway
+//			Current gateway: Current default gateway address for the IP host service. (R) (optional)
+//			(4 bytes)
+//
+//		Current Primary Dns
+//			Current primary DNS: Current primary DNS server address. (R) (optional) (4 bytes)
+//
+//		Current Secondary Dns
+//			Current secondary DNS: Current secondary DNS server address. (R) (optional) (4 bytes)
+//
+//		Domain Name
+//			Domain name: If DHCP indicates a domain name, it is presented here. If no domain name is
+//			indicated, this attribute is set to a null string. If the string is shorter than 25 bytes, it
+//			must be null terminated. The default value is 25 null bytes. (R) (mandatory) (25 bytes)
+//
+//		Host Name
+//			Host name:	If DHCP indicates a host name, it is presented here. If no host name is indicated,
+//			this attribute is set to a null string. If the string is shorter than 25 bytes, it must be null
+//			terminated. The default value is 25 null bytes. (R) (mandatory) (25 bytes)
+//
+//		Relay Agent Options
+//			2/3/4:atm/123.4567
+//
+type IpHostConfigData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	iphostconfigdataBME = &ManagedEntityDefinition{
+		Name:    "IpHostConfigData",
+		ClassID: 134,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("IpOptions", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  MultiByteField("MacAddress", 6, nil, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  MultiByteField("OnuIdentifier", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  Uint32Field("IpAddress", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  Uint32Field("Mask", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint32Field("Gateway", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint32Field("PrimaryDns", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  Uint32Field("SecondaryDns", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  Uint32Field("CurrentAddress", 0, mapset.NewSetWith(Read), true, false, true, false, 9),
+			10: Uint32Field("CurrentMask", 0, mapset.NewSetWith(Read), true, false, true, false, 10),
+			11: Uint32Field("CurrentGateway", 0, mapset.NewSetWith(Read), true, false, true, false, 11),
+			12: Uint32Field("CurrentPrimaryDns", 0, mapset.NewSetWith(Read), true, false, true, false, 12),
+			13: Uint32Field("CurrentSecondaryDns", 0, mapset.NewSetWith(Read), true, false, true, false, 13),
+			14: MultiByteField("DomainName", 25, nil, mapset.NewSetWith(Read), true, false, false, false, 14),
+			15: MultiByteField("HostName", 25, nil, mapset.NewSetWith(Read), true, false, false, false, 15),
+			16: Uint16Field("RelayAgentOptions", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 16),
+		},
+	}
+}
+
+// NewIpHostConfigData (class ID 134 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewIpHostConfigData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(iphostconfigdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/iphostperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/iphostperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..e3e05f6
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/iphostperformancemonitoringhistorydata.go
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const IpHostPerformanceMonitoringHistoryDataClassId ClassID = ClassID(135)
+
+var iphostperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// IpHostPerformanceMonitoringHistoryData (class ID #135)
+//	This ME collects PM data related to an IP host. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the IP host config data or IPv6 host
+//		config data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the IP host configuration data or
+//			IPv6 host configuration data ME. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Icmp Errors
+//			ICMP errors: This attribute counts ICMP errors received. (R) (mandatory) (4 bytes)
+//
+//		Dns Errors
+//			DNS errors:	This attribute counts DNS errors received. (R) (mandatory) (4 bytes)
+//
+//		Dhcp Timeouts
+//			DHCP timeouts:	This attribute counts DHCP timeouts. (R) (optional) (2 bytes)
+//
+//		Ip Address Conflict
+//			IP address conflict: This attribute is incremented whenever the ONU detects a conflicting IP
+//			address on the network. A conflicting IP address is one that has the same value as the one
+//			currently assigned to the ONU. (R) (optional) (2 bytes)
+//
+//		Out Of Memory
+//			Out of memory: This attribute is incremented whenever the ONU encounters an out of memory
+//			condition in the IP stack. (R) (optional) (2 bytes)
+//
+//		Internal Error
+//			Internal error: This attribute is incremented whenever the ONU encounters an internal error
+//			condition such as a driver interface failure in the IP stack. (R) (optional) (2 bytes)
+//
+type IpHostPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	iphostperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "IpHostPerformanceMonitoringHistoryData",
+		ClassID: 135,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("IcmpErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("DnsErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint16Field("DhcpTimeouts", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6: Uint16Field("IpAddressConflict", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7: Uint16Field("OutOfMemory", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8: Uint16Field("InternalError", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewIpHostPerformanceMonitoringHistoryData (class ID 135 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewIpHostPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(iphostperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeconfigurationdata.go b/vendor/github.com/cboling/omci/generated/macbridgeconfigurationdata.go
new file mode 100644
index 0000000..7e39613
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeconfigurationdata.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgeConfigurationDataClassId ClassID = ClassID(46)
+
+var macbridgeconfigurationdataBME *ManagedEntityDefinition
+
+// MacBridgeConfigurationData (class ID #46)
+//	This ME organizes status data associated with a MAC bridge. The ONU automatically creates or
+//	deletes an instance of this ME upon the creation or deletion of a MAC bridge service profile.
+//
+//	Relationships
+//		This ME is associated with one instance of a MAC bridge service profile.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge service profile. (R)
+//			(mandatory) (2 bytes)
+//
+//		Bridge Mac Address
+//			Bridge MAC address: This attribute indicates the MAC address used by the bridge. The ONU sets
+//			this attribute to a value based on criteria beyond the scope of this Recommendation, e.g.,
+//			factory settings. (R) (mandatory) (6 bytes)
+//
+//		Bridge Priority
+//			Bridge priority: This attribute reports the priority of the bridge. The ONU copies this
+//			attribute from the priority attribute of the associated MAC bridge service profile. The value of
+//			this attribute changes with updates to the MAC bridge service profile priority attribute. (R)
+//			(mandatory) (2 bytes)
+//
+//		Designated Root
+//			Designated root: This attribute identifies the bridge at the root of the spanning tree. It
+//			comprises bridge priority (2 bytes) and MAC address (6 bytes). (R) (mandatory) (8 bytes)
+//
+//		Root Path Cost
+//			Root path cost: This attribute reports the cost of the best path to the root as seen from this
+//			bridge. Upon ME instantiation, the ONU sets this attribute to 0. (R) (mandatory) (4 bytes)
+//
+//		Bridge Port Count
+//			Bridge port count: This attribute records the number of ports linked to this bridge. (R)
+//			(mandatory) (1 byte)
+//
+//		Root Port Num
+//			Root port num: This attribute contains the port number that has the lowest cost from the bridge
+//			to the root bridge. The value 0 means that this bridge is itself the root. Upon ME
+//			instantiation, the ONU sets this attribute to 0. (R) (mandatory) (2 bytes)
+//
+//		Hello Time
+//			NOTE – [IEEE 802.1D] specifies the compatibility range for hello time to be 1..2 s.
+//
+//		Forward Delay
+//			Forward delay: This attribute is the forwarding delay time received from the designated root (in
+//			256ths of a second). Its range is 0x0400 to 0x1E00 (4..30 s) in accordance with [IEEE 802.1D].
+//			(R) (optional) (2 bytes)
+//
+type MacBridgeConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgeConfigurationData",
+		ClassID: 46,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("BridgeMacAddress", 6, nil, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("BridgePriority", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: Uint64Field("DesignatedRoot", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("RootPathCost", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: ByteField("BridgePortCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint16Field("RootPortNum", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint16Field("HelloTime", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8: Uint16Field("ForwardDelay", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewMacBridgeConfigurationData (class ID 46 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgeConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/macbridgeperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..96265e2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeperformancemonitoringhistorydata.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePerformanceMonitoringHistoryDataClassId ClassID = ClassID(51)
+
+var macbridgeperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// MacBridgePerformanceMonitoringHistoryData (class ID #51)
+//	This ME collects PM data associated with a MAC bridge. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		This ME is associated with an instance of a MAC bridge service profile.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge service profile. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. Since no threshold value attribute number exceeds 7, a threshold data 2
+//			ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Bridge Learning Entry Discard Count
+//			Bridge learning entry discard count: This attribute counts forwarding database entries that have
+//			been or would have been learned, but were discarded or replaced due to a lack of space in the
+//			database table. When used with the MAC learning depth attribute of the MAC bridge service
+//			profile, the bridge learning entry discard count may be particularly useful in detecting MAC
+//			spoofing attempts. (R) (mandatory) (4 bytes)
+//
+type MacBridgePerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePerformanceMonitoringHistoryData",
+		ClassID: 51,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("BridgeLearningEntryDiscardCount", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewMacBridgePerformanceMonitoringHistoryData (class ID 51 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportbridgetabledata.go b/vendor/github.com/cboling/omci/generated/macbridgeportbridgetabledata.go
new file mode 100644
index 0000000..a632d59
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportbridgetabledata.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortBridgeTableDataClassId ClassID = ClassID(50)
+
+var macbridgeportbridgetabledataBME *ManagedEntityDefinition
+
+// MacBridgePortBridgeTableData (class ID #50)
+//	This ME reports status data associated with a bridge port. The ONU automatically creates or
+//	deletes an instance of this ME upon the creation or deletion of a MAC bridge port configuration
+//	data.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R) (mandatory) (2 bytes)
+//
+//		Bridge Table
+//			Upon ME instantiation, this attribute is an empty list. (R) (mandatory) (8 * M bytes, where M is
+//			the number of entries in the list.)
+//
+type MacBridgePortBridgeTableData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportbridgetabledataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortBridgeTableData",
+		ClassID: 50,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: TableField("BridgeTable", TableInfo{0, 8}, mapset.NewSetWith(Read), false, false, false, 1),
+		},
+	}
+}
+
+// NewMacBridgePortBridgeTableData (class ID 50 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortBridgeTableData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportbridgetabledataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportconfigurationdata.go b/vendor/github.com/cboling/omci/generated/macbridgeportconfigurationdata.go
new file mode 100644
index 0000000..7054adb
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportconfigurationdata.go
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortConfigurationDataClassId ClassID = ClassID(47)
+
+var macbridgeportconfigurationdataBME *ManagedEntityDefinition
+
+// MacBridgePortConfigurationData (class ID #47)
+//	This ME models a port on a MAC bridge. Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is linked to an instance of the MAC bridge service profile. Additional
+//		bridge port control capabilities are provided by implicitly linked instances of some or all of:////		•	MAC bridge port filter table data;////		•	MAC bridge port filter pre-assign table;////		•	VLAN tagging filter data;////		•	Dot1 rate limiter.////		Real-time status of the bridge port is provided by implicitly linked instances of:////		•	MAC bridge port designation data;////		•	MAC bridge port bridge table data;////		•	Multicast subscriber monitor.////		Bridge port PM collection is provided by implicitly linked instances of:////		•	MAC bridge port PM history data;////		•	Ethernet frame PM history data upstream and downstream;////		•	Ethernet frame extended PM (preferred).
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Bridge Id Pointer
+//			Bridge ID pointer: This attribute points to an instance of the MAC bridge service profile.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Port Num
+//			Port num:	This attribute is the bridge port number. It must be unique among all ports associated
+//			with a particular MAC bridge service profile. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tp Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tp Pointer
+//			NOTE 1 – When the TP type is very high-speed digital subscriber line (VDSL) or xDSL, the two
+//			MSBs may be used to indicate a bearer channel.
+//
+//		Port Priority
+//			Port priority:	This attribute denotes the priority of the port for use in (rapid) spanning tree
+//			algorithms. The range is 0..255. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Port Path Cost
+//			Port path cost: This attribute specifies the contribution of the port to the path cost towards
+//			the spanning tree root bridge. The range is 1..65535. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Port Spanning Tree Ind
+//			Port spanning tree ind: The Boolean value true enables (R)STP LAN topology change detection at
+//			this port. The value false disables topology change detection. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Deprecated 1
+//			Deprecated 1: This attribute is not used. If present, it should be ignored by both the ONU and
+//			the OLT, except as necessary to comply with OMCI message definitions. (R, W, setbycreate)
+//			(optional) (1 byte)
+//
+//		Deprecated 2
+//			Deprecated 2: This attribute is not used. If present, it should be ignored by both the ONU and
+//			the OLT, except as necessary to comply with OMCI message definitions. (R, W, setbycreate)
+//			(1 byte) (optional)
+//
+//		Port Mac Address
+//			Port MAC address: If the TP associated with this port has a MAC address, this attribute
+//			specifies it. (R) (optional) (6 bytes)
+//
+//		Outbound Td Pointer
+//			Outbound TD pointer: This attribute points to a traffic descriptor that limits the traffic rate
+//			leaving the MAC bridge. (R, W) (optional) (2 byte)
+//
+//		Inbound Td Pointer
+//			Inbound TD pointer: This attribute points to a traffic descriptor that limits the traffic rate
+//			entering the MAC bridge. (R, W) (optional) (2 byte)
+//
+//		Mac Learning Depth
+//			NOTE 2 – If this attribute is not zero, its value overrides the value set in the MAC learning
+//			depth attribute of the MAC bridge service profile.
+//
+type MacBridgePortConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortConfigurationData",
+		ClassID: 47,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("BridgeIdPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  ByteField("PortNum", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("TpType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("TpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("PortPriority", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6:  Uint16Field("PortPathCost", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  ByteField("PortSpanningTreeInd", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("Deprecated1", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, true, 8),
+			9:  ByteField("Deprecated2", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, true, 9),
+			10: MultiByteField("PortMacAddress", 6, nil, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint16Field("OutboundTdPointer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: Uint16Field("InboundTdPointer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: ByteField("MacLearningDepth", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 13),
+		},
+	}
+}
+
+// NewMacBridgePortConfigurationData (class ID 47 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportdesignationdata.go b/vendor/github.com/cboling/omci/generated/macbridgeportdesignationdata.go
new file mode 100644
index 0000000..bfcf8b2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportdesignationdata.go
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortDesignationDataClassId ClassID = ClassID(48)
+
+var macbridgeportdesignationdataBME *ManagedEntityDefinition
+
+// MacBridgePortDesignationData (class ID #48)
+//	This ME records data associated with a bridge port. The ONU automatically creates or deletes an
+//	instance of this managed entity upon the creation or deletion of a MAC bridge port configuration
+//	data ME.
+//
+//	Relationships
+//		An instance of this managed entity is associated with one MAC bridge port configuration data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data. (R) (mandatory) (2 bytes)
+//
+//		Designated Bridge Root Cost Port
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R) (mandatory) (24 bytes)
+//
+//		Port State
+//			The value (R)stp_off is introduced to denote the port status where the (rapid) spanning tree
+//			protocol has been disabled by setting the port spanning tree ind attribute of the MAC bridge
+//			port configuration data to false, and the Ethernet link state is up. This value distinguishes
+//			whether frame forwarding is under the control of (R)STP.
+//
+type MacBridgePortDesignationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportdesignationdataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortDesignationData",
+		ClassID: 48,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("DesignatedBridgeRootCostPort", 24, nil, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("PortState", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+		},
+	}
+}
+
+// NewMacBridgePortDesignationData (class ID 48 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortDesignationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportdesignationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportfilterpre-assigntable.go b/vendor/github.com/cboling/omci/generated/macbridgeportfilterpre-assigntable.go
new file mode 100644
index 0000000..2409ab0
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportfilterpre-assigntable.go
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortFilterPreAssignTableClassId ClassID = ClassID(79)
+
+var macbridgeportfilterpreassigntableBME *ManagedEntityDefinition
+
+// MacBridgePortFilterPreAssignTable (class ID #79)
+//	This ME provides an alternate approach to DA filtering from that supported through the MAC
+//	bridge port filter table data ME. This alternate approach is useful when all groups of addresses
+//	are stored beforehand in the ONU, and the MAC bridge port filter pre-assign table ME designates
+//	which groups are valid or invalid for filtering. On a circuit pack in which all groups of
+//	addresses are pre-assigned and stored locally, the ONU creates or deletes an instance of this ME
+//	automatically upon creation or deletion of a MAC bridge port configuration data ME.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R) (mandatory) (2 bytes)
+//
+//		Ipv4 Multicast Filtering
+//			IPv4 multicast filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Ipv6 Multicast Filtering
+//			IPv6 multicast filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Ipv4 Broadcast Filtering
+//			IPv4 broadcast filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Rarp Filtering
+//			RARP filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Ipx Filtering
+//			IPX filtering:		(R, W) (mandatory) (1 byte)
+//
+//		Netbeui Filtering
+//			NetBEUI filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Appletalk Filtering
+//			AppleTalk filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Bridge Management Information Filtering
+//			2	Addresses from 01.80.C2.00.00.20 to 01.80.C2.00.00.2F are used for generic attribute
+//			registration protocol (GARP) applications.
+//
+//		Arp Filtering
+//			ARP filtering:	(R, W) (mandatory) (1 byte)
+//
+//		Point_To_Point Protocol Over Ethernet Pppoe Broadcast Filtering
+//			Point-to-point protocol over Ethernet (PPPoE) broadcast filtering:	(R, W) (mandatory) (1 byte)
+//
+type MacBridgePortFilterPreAssignTable struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportfilterpreassigntableBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortFilterPreAssignTable",
+		ClassID: 79,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("Ipv4MulticastFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("Ipv6MulticastFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  ByteField("Ipv4BroadcastFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  ByteField("RarpFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  ByteField("IpxFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  ByteField("NetbeuiFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("AppletalkFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("BridgeManagementInformationFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("ArpFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: ByteField("PointToPointProtocolOverEthernetPppoeBroadcastFiltering", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+		},
+	}
+}
+
+// NewMacBridgePortFilterPreAssignTable (class ID 79 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortFilterPreAssignTable(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportfilterpreassigntableBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportfiltertabledata.go b/vendor/github.com/cboling/omci/generated/macbridgeportfiltertabledata.go
new file mode 100644
index 0000000..b542f48
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportfiltertabledata.go
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortFilterTableDataClassId ClassID = ClassID(49)
+
+var macbridgeportfiltertabledataBME *ManagedEntityDefinition
+
+// MacBridgePortFilterTableData (class ID #49)
+//	This ME organizes data associated with a bridge port. The ONU automatically creates or deletes
+//	an instance of this ME upon the creation or deletion of a MAC bridge port configuration data ME.
+//
+//	NOTE – The OLT should disable the learning mode in the MAC bridge service profile before writing
+//	to the MAC filter table.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R) (mandatory) (2 bytes)
+//
+//		Mac Filter Table
+//			(R, W) (Mandatory) (8N bytes, where N is the number of entries in the list)
+//
+type MacBridgePortFilterTableData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportfiltertabledataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortFilterTableData",
+		ClassID: 49,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: TableField("MacFilterTable", TableInfo{0, 8}, mapset.NewSetWith(Read, Write), false, false, false, 1),
+		},
+	}
+}
+
+// NewMacBridgePortFilterTableData (class ID 49 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortFilterTableData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportfiltertabledataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeporticmpv6processpre-assigntable.go b/vendor/github.com/cboling/omci/generated/macbridgeporticmpv6processpre-assigntable.go
new file mode 100644
index 0000000..83114d2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeporticmpv6processpre-assigntable.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortIcmpv6ProcessPreAssignTableClassId ClassID = ClassID(348)
+
+var macbridgeporticmpv6processpreassigntableBME *ManagedEntityDefinition
+
+// MacBridgePortIcmpv6ProcessPreAssignTable (class ID #348)
+//	This ME provides an approach to ICMPv6 message processing configuration to those ONUs that
+//	support IPv6 awareness. For every message, the MAC bridge port ICMPv6 process pre-assign table
+//	can designate a forward, discard or snoop operation. The ONU creates or deletes an instance of
+//	this ME automatically upon creation or deletion of a MAC bridge port configuration data ME.
+//
+//	The MAC bridge port ICMPv6 process pre-assign table ME filters layer 2 traffic between the UNI
+//	and ANI. The operation of this ME is completely independent of the operation and traffic
+//	generated or received by a possible IPv6 host config data ME.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R) (mandatory) (2 bytes)
+//
+//		Icmpv6 Error Messages Processing
+//			ICMPv6 error messages processing:	(R, W) (mandatory) (1 byte)
+//
+//		Icmpv6 Informational Messages Processing
+//			ICMPv6 informational messages processing:	(R, W) (mandatory) (1 byte)
+//
+//		Router Solicitation Processing
+//			Router solicitation processing:	(R, W) (mandatory) (1 byte)
+//
+//		Router Advertisement Processing
+//			Router advertisement processing:	(R, W) (mandatory) (1 byte)
+//
+//		Neighbour Solicitation Processing
+//			Neighbour solicitation processing:	(R, W) (mandatory) (1 byte)
+//
+//		Neighbour Advertisement Processing
+//			Neighbour advertisement processing:	(R, W) (mandatory) (1 byte)
+//
+//		Redirect Processing
+//			Redirect processing:	(R, W) (mandatory) (1 byte)
+//
+//		Multicast Listener Query Processing
+//			NOTE – If the ONU participates in multicast services, MLD queries should be controlled through
+//			the multicast operations profile ME. In such a case, it is strongly recommended not to provision
+//			the downstream direction of the multicast listener query processing attribute to any value other
+//			than forwarding.
+//
+//		Unknown Icmpv6 Processing
+//			Unknown ICMPv6 processing:	(R, W) (mandatory) (1 byte)
+//
+type MacBridgePortIcmpv6ProcessPreAssignTable struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeporticmpv6processpreassigntableBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortIcmpv6ProcessPreAssignTable",
+		ClassID: 348,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("Icmpv6ErrorMessagesProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: ByteField("Icmpv6InformationalMessagesProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: ByteField("RouterSolicitationProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: ByteField("RouterAdvertisementProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: ByteField("NeighbourSolicitationProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6: ByteField("NeighbourAdvertisementProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7: ByteField("RedirectProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8: ByteField("MulticastListenerQueryProcessing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9: ByteField("UnknownIcmpv6Processing", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+		},
+	}
+}
+
+// NewMacBridgePortIcmpv6ProcessPreAssignTable (class ID 348 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortIcmpv6ProcessPreAssignTable(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeporticmpv6processpreassigntableBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeportperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/macbridgeportperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..d1f43e8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeportperformancemonitoringhistorydata.go
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgePortPerformanceMonitoringHistoryDataClassId ClassID = ClassID(52)
+
+var macbridgeportperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// MacBridgePortPerformanceMonitoringHistoryData (class ID #52)
+//	This ME collects PM data associated with a MAC bridge port. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Forwarded Frame Counter
+//			Forwarded frame counter: This attribute counts frames transmitted successfully on this port. (R)
+//			(mandatory) (4 bytes)
+//
+//		Delay Exceeded Discard Counter
+//			Delay exceeded discard counter: This attribute counts frames discarded on this port because
+//			transmission was delayed. (R) (mandatory) (4 bytes)
+//
+//		Maximum Transmission U Nit Mtu Exceeded Discard Counter
+//			Maximum transmission unit (MTU) exceeded discard counter: This attribute counts frames discarded
+//			on this port because the MTU was exceeded. (R) (mandatory) (4 bytes)
+//
+//		Received Frame Counter
+//			Received frame counter: This attribute counts frames received on this port. (R) (mandatory)
+//			(4 bytes)
+//
+//		Received And Discarded Counter
+//			Received and discarded counter: This attribute counts frames received on this port that were
+//			discarded due to errors. (R) (mandatory) (4 bytes)
+//
+type MacBridgePortPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeportperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "MacBridgePortPerformanceMonitoringHistoryData",
+		ClassID: 52,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("ForwardedFrameCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("DelayExceededDiscardCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("MaximumTransmissionUNitMtuExceededDiscardCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("ReceivedFrameCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("ReceivedAndDiscardedCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewMacBridgePortPerformanceMonitoringHistoryData (class ID 52 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgePortPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeportperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/macbridgeserviceprofile.go b/vendor/github.com/cboling/omci/generated/macbridgeserviceprofile.go
new file mode 100644
index 0000000..f9e8c6c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/macbridgeserviceprofile.go
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MacBridgeServiceProfileClassId ClassID = ClassID(45)
+
+var macbridgeserviceprofileBME *ManagedEntityDefinition
+
+// MacBridgeServiceProfile (class ID #45)
+//	This ME models a MAC bridge in its entirety; any number of ports may be associated with the
+//	bridge through pointers to the MAC bridge service profile ME. Instances of this ME are created
+//	and deleted by the OLT.
+//
+//	Relationships
+//		Bridge ports are modelled by MAC bridge port configuration data MEs, any number of which can
+//		point to a MAC bridge service profile. The real-time status of the bridge is available from an
+//		implicitly linked MAC bridge configuration data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The first byte
+//			is the slot ID. In an integrated ONU, this value is 0. The second byte is the bridge group ID.
+//			(R, setbycreate) (mandatory) (2 bytes)
+//
+//		Spanning Tree Ind
+//			Spanning tree ind: The Boolean value true specifies that a spanning tree algorithm is enabled.
+//			The value false disables (rapid) spanning tree. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Learning Ind
+//			Learning ind: The Boolean value true specifies that bridge learning functions are enabled. The
+//			value false disables bridge learning. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Port Bridging Ind
+//			Port bridging ind: The Boolean value true specifies that bridging between UNI ports is enabled.
+//			The value false disables local bridging. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Priority
+//			Priority:	This attribute specifies the bridge priority in the range 0..65535. The value of this
+//			attribute is copied to the bridge priority attribute of the associated MAC bridge configuration
+//			data ME. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Max Age
+//			Max age:	This attribute specifies the maximum age (in 256ths of a second) of received protocol
+//			information before its entry in the spanning tree listing is discarded. The range is 0x0600 to
+//			0x2800 (6..40 s) in accordance with [IEEE 802.1D]. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Hello Time
+//			NOTE – [IEEE 802.1D] specifies the compatibility range for hello time to be 1..2 s.
+//
+//		Forward Delay
+//			Forward delay: This attribute specifies the forwarding delay (in 256ths of a second) when the
+//			bridge acts as the root. The range is 0x0400 to 0x1E00 (4..30 s) in accordance with [IEEE
+//			802.1D]. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Unknown Mac Address Discard
+//			Unknown MAC address discard: The Boolean value true specifies that MAC frames with unknown DAs
+//			be discarded. The value false specifies that such frames be forwarded to all allowed ports.
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Mac Learning Depth
+//			MAC learning depth: This attribute specifies the maximum number of UNI MAC addresses to be
+//			learned by the bridge. The default value 0 specifies that there is no administratively imposed
+//			limit. (R, W, setbycreate) (optional) (1 byte)
+//
+//		Dynamic Filtering Ageing Time
+//			Dynamic filtering ageing time: This attribute specifies the age of dynamic filtering entries in
+//			the bridge database, after which unrefreshed entries are discarded. In accordance with clause
+//			7.9.2 of [IEEE 802.1D] and clause 8.8.3 of [IEEE 802.1Q], the range is 10..1 000 000 s, with a
+//			resolution of 1 s and a default of 300 s. The value 0 specifies that the ONU uses its internal
+//			default. (R, W, set-by-create) (optional) (4 bytes)
+//
+type MacBridgeServiceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	macbridgeserviceprofileBME = &ManagedEntityDefinition{
+		Name:    "MacBridgeServiceProfile",
+		ClassID: 45,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("SpanningTreeInd", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  ByteField("LearningInd", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("PortBridgingInd", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("Priority", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("MaxAge", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint16Field("HelloTime", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  Uint16Field("ForwardDelay", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("UnknownMacAddressDiscard", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("MacLearningDepth", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 9),
+			10: Uint32Field("DynamicFilteringAgeingTime", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 10),
+		},
+	}
+}
+
+// NewMacBridgeServiceProfile (class ID 45 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMacBridgeServiceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(macbridgeserviceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/managedentityme.go b/vendor/github.com/cboling/omci/generated/managedentityme.go
new file mode 100644
index 0000000..4f71571
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/managedentityme.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ManagedEntityMeClassId ClassID = ClassID(288)
+
+var managedentitymeBME *ManagedEntityDefinition
+
+// ManagedEntityMe (class ID #288)
+//	The ME describes the details of each ME that is supported by the ONU. This ME is not included in
+//	an MIB upload.
+//
+//	Relationships
+//		One or more MEs are related to the OMCI object entity.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Its value is
+//			equal to the ME type value, and is the same as the code found in the ME type table attribute of
+//			the OMCI ME and Table 11.2.41. (R) (mandatory) (2 bytes)
+//
+//		Name
+//			Name:	This attribute contains a 25 byte ASCII coded mnemonic tag for the ME type. Strings
+//			shorter than 25 bytes are padded with null characters. (R) (mandatory) (25 bytes)
+//
+//		Attributes Table
+//			NOTE – The ME ID attribute is not included in the list, since the type of this attribute is
+//			fixed.
+//
+//		Access
+//			(R) (mandatory) (1 byte)
+//
+//		Alarms Table
+//			Alarms table: This attribute lists the alarm codes that are supported. (R) (mandatory) (Y bytes,
+//			where Y is the number of entries in the table.)
+//
+//		Avcs Table
+//			AVCs table:	This attribute lists the AVCs that are supported. (R) (mandatory) (Z bytes, where Z
+//			is the number of entries in the table.)
+//
+//		Actions
+//			Actions:	This attribute lists the action codes supported on this object, formatted as a bit map.
+//			The action codes are the MTs from Table 11.2.2-1. The LSB represents action 0, and so on. (R)
+//			(mandatory) (4 bytes)
+//
+//		Instances Table
+//			Instances table: This attribute is a list of pointers to all instances of this ME. (R)
+//			(mandatory) (2 * V bytes, where V is the number of entries in the table.)
+//
+//		Support
+//			(R) (mandatory) (1 byte)
+//
+type ManagedEntityMe struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	managedentitymeBME = &ManagedEntityDefinition{
+		Name:    "ManagedEntityMe",
+		ClassID: 288,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("Name", 25, nil, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: TableField("AttributesTable", TableInfo{0, 1}, mapset.NewSetWith(Read), false, false, false, 2),
+			3: ByteField("Access", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: TableField("AlarmsTable", TableInfo{0, 1}, mapset.NewSetWith(Read), false, false, false, 4),
+			5: TableField("AvcsTable", TableInfo{0, 1}, mapset.NewSetWith(Read), false, false, false, 5),
+			6: Uint32Field("Actions", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: TableField("InstancesTable", TableInfo{0, 1}, mapset.NewSetWith(Read), false, false, false, 7),
+			8: ByteField("Support", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewManagedEntityMe (class ID 288 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewManagedEntityMe(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(managedentitymeBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/me.go b/vendor/github.com/cboling/omci/generated/me.go
new file mode 100644
index 0000000..131522f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/me.go
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/deckarep/golang-set"
+	"github.com/google/gopacket"
+)
+
+type ManagedEntity struct {
+	definition    *ManagedEntityDefinition
+	attributeMask uint16
+	attributes    AttributeValueMap
+}
+
+// String provides a simple string that describes this struct
+func (entity *ManagedEntity) String() string {
+	return fmt.Sprintf("ManagedEntity: %v, EntityID: (%d/%#x): Attributes: %v",
+		entity.GetClassID(), entity.GetEntityID(), entity.GetEntityID(), entity.attributes)
+}
+
+func NewManagedEntity(definition *ManagedEntityDefinition, params ...ParamData) (*ManagedEntity, OmciErrors) {
+	entity := &ManagedEntity{
+		definition: definition,
+		attributes: make(map[string]interface{}),
+	}
+	if params != nil {
+		err := entity.setAttributes(params...)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return entity, nil
+}
+
+func (entity *ManagedEntity) GetManagedEntityDefinition() *ManagedEntityDefinition {
+	return entity.definition
+}
+
+func (entity *ManagedEntity) SetManagedEntityDefinition(def *ManagedEntityDefinition) {
+	entity.definition = def
+}
+
+func (entity *ManagedEntity) GetName() string {
+	return entity.definition.GetName()
+}
+
+func (entity *ManagedEntity) GetClassID() ClassID {
+	return entity.definition.GetClassID()
+}
+
+func (entity *ManagedEntity) GetMessageTypes() mapset.Set {
+	return entity.definition.GetMessageTypes()
+}
+
+func (entity *ManagedEntity) GetAllowedAttributeMask() uint16 {
+	return entity.definition.GetAllowedAttributeMask()
+}
+
+func (entity *ManagedEntity) GetAttributeDefinitions() *AttributeDefinitionMap {
+	return entity.definition.GetAttributeDefinitions()
+}
+
+func (entity *ManagedEntity) DecodeAttributes(mask uint16, data []byte, p gopacket.PacketBuilder, msgType byte) (AttributeValueMap, error) {
+	return entity.definition.DecodeAttributes(mask, data, p, msgType)
+}
+
+func (entity *ManagedEntity) SerializeAttributes(attr AttributeValueMap, mask uint16,
+	b gopacket.SerializeBuffer, msgType byte, bytesAvailable int) error {
+	return entity.definition.SerializeAttributes(attr, mask, b, msgType, bytesAvailable)
+}
+
+func (entity *ManagedEntity) GetEntityID() uint16 {
+	if eid, err := entity.GetAttributeByIndex(0); err == nil {
+		return eid.(uint16)
+	}
+	return 0
+}
+
+func (entity *ManagedEntity) SetEntityID(eid uint16) error {
+	return entity.SetAttributeByIndex(0, eid)
+}
+
+func (entity *ManagedEntity) GetAttributeMask() uint16 {
+	return entity.attributeMask
+}
+
+func (entity *ManagedEntity) GetAttributeValueMap() *AttributeValueMap {
+	return &entity.attributes
+}
+
+func (entity *ManagedEntity) GetAttribute(name string) (interface{}, error) {
+	value, ok := entity.attributes[name]
+	if !ok {
+		return 0, errors.New(fmt.Sprintf("attribute '%v' not found", name))
+	}
+	return value, nil
+}
+
+func (entity *ManagedEntity) GetAttributeByIndex(index uint) (interface{}, error) {
+	if len(entity.attributes) == 0 {
+		return nil, errors.New("attributes have already been set")
+	}
+	if _, ok := entity.definition.AttributeDefinitions[index]; !ok {
+		return nil, errors.New(fmt.Sprintf("invalid attribute index: %d, should be 0..%d",
+			index, len(entity.definition.AttributeDefinitions)-1))
+	}
+	return entity.GetAttribute(entity.definition.AttributeDefinitions[index].Name)
+}
+
+func (entity *ManagedEntity) setAttributes(params ...ParamData) OmciErrors {
+	if entity.attributes == nil {
+		entity.attributes = make(map[string]interface{})
+	} else if len(entity.attributes) > 0 {
+		return NewNonStatusError("attributes have already been set")
+	}
+	eidName := entity.definition.AttributeDefinitions[0].Name
+	if len(params) == 0 {
+		entity.attributes[eidName] = uint16(0)
+		return nil
+	}
+	entity.attributes[eidName] = params[0].EntityID
+
+	for name, value := range params[0].Attributes {
+		if name == eidName {
+			continue
+		}
+		if err := entity.SetAttribute(name, value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (entity *ManagedEntity) SetAttribute(name string, value interface{}) OmciErrors {
+	attrDef, err := GetAttributeDefinitionByName(entity.definition.GetAttributeDefinitions(), name)
+	if err != nil {
+		return err
+	} else if entity.attributes == nil {
+		entity.attributes = make(map[string]interface{})
+	}
+	mask := uint16(1 << (16 - attrDef.GetIndex()))
+	// check any constraints
+	if constraintCheck := attrDef.GetConstraints(); constraintCheck != nil {
+		err = constraintCheck(value)
+		if err != nil {
+			return NewParameterError(mask, err)
+		}
+	}
+	entity.attributes[name] = value
+	entity.attributeMask |= mask
+	return nil
+}
+
+func (entity *ManagedEntity) SetAttributeByIndex(index uint, value interface{}) error {
+	attrDef, ok := entity.definition.AttributeDefinitions[index]
+	if !ok {
+		return errors.New(fmt.Sprintf("invalid attribute index: %d, should be 0..%d",
+			index, len(entity.definition.AttributeDefinitions)-1))
+	} else if entity.attributes == nil {
+		entity.attributes = make(map[string]interface{})
+	}
+	mask := uint16(1 << (16 - attrDef.GetIndex()))
+	// check any constraints
+	if constraintCheck := attrDef.GetConstraints(); constraintCheck != nil {
+		err := constraintCheck(value)
+		if err != nil {
+			return NewParameterError(mask, err)
+		}
+	}
+	entity.attributes[attrDef.Name] = value
+	entity.attributeMask |= mask
+	return nil
+}
+
+func (entity *ManagedEntity) DeleteAttribute(name string) error {
+	attrDef, err := GetAttributeDefinitionByName(entity.definition.GetAttributeDefinitions(), name)
+	if err != nil {
+		return err
+	}
+	if entity.attributes != nil {
+		delete(entity.attributes, name)
+		entity.attributeMask &= ^uint16(1 << (16 - attrDef.GetIndex()))
+	}
+	return nil
+}
+
+func (entity *ManagedEntity) DeleteAttributeByIndex(index uint) error {
+	attrDef, ok := entity.definition.AttributeDefinitions[index]
+	if !ok {
+		return errors.New(fmt.Sprintf("invalid attribute index: %d, should be 0..%d",
+			index, len(entity.definition.AttributeDefinitions)-1))
+	}
+	if entity.attributes != nil {
+		delete(entity.attributes, attrDef.Name)
+		entity.attributeMask &= ^uint16(1 << (16 - attrDef.GetIndex()))
+	}
+	return nil
+}
+
+func (entity *ManagedEntity) DecodeFromBytes(data []byte, p gopacket.PacketBuilder, msgType byte) error {
+	if len(data) < 6 {
+		p.SetTruncated()
+		return errors.New("frame too small")
+	}
+	classID := ClassID(binary.BigEndian.Uint16(data[0:2]))
+	entityID := binary.BigEndian.Uint16(data[2:4])
+	parameters := ParamData{EntityID: entityID}
+
+	meDefinition, omciErr := LoadManagedEntityDefinition(classID, parameters)
+	if omciErr != nil {
+		return omciErr.GetError()
+	}
+	entity.definition = meDefinition.definition
+	entity.attributeMask = binary.BigEndian.Uint16(data[4:6])
+	entity.attributes = make(map[string]interface{})
+	entity.SetEntityID(entityID)
+	packetAttributes, err := entity.DecodeAttributes(entity.GetAttributeMask(), data[6:], p, msgType)
+	if err != nil {
+		return err
+	}
+	for name, value := range packetAttributes {
+		entity.attributes[name] = value
+	}
+	return nil
+}
+
+func (entity *ManagedEntity) SerializeTo(b gopacket.SerializeBuffer, msgType byte, bytesAvailable int) error {
+	// Add class ID and entity ID
+	bytes, err := b.AppendBytes(6)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(entity.GetClassID()))
+	binary.BigEndian.PutUint16(bytes[2:], entity.GetEntityID())
+	binary.BigEndian.PutUint16(bytes[4:], entity.GetAttributeMask())
+
+	// TODO: Need to limit number of bytes appended to not exceed packet size
+	// Is there space/metadata info in 'b' parameter to allow this?
+	err = entity.SerializeAttributes(entity.attributes, entity.GetAttributeMask(), b, msgType, bytesAvailable)
+	return err
+}
diff --git a/vendor/github.com/cboling/omci/generated/medef.go b/vendor/github.com/cboling/omci/generated/medef.go
new file mode 100644
index 0000000..2c76614
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/medef.go
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import (
+	"errors"
+	"fmt"
+	"github.com/deckarep/golang-set"
+	"github.com/google/gopacket"
+	"math/bits"
+)
+
+type ManagedEntityDefinition struct {
+	Name         string
+	ClassID      ClassID
+	MessageTypes mapset.Set // Mandatory
+	// TODO: Support Optional Message types
+	AllowedAttributeMask uint16
+	AttributeDefinitions AttributeDefinitionMap
+}
+
+func (bme *ManagedEntityDefinition) String() string {
+	return fmt.Sprintf("Definition: %s: CID: %v, Attributes: %v",
+		bme.Name, bme.ClassID, bme.AttributeDefinitions)
+}
+
+func (bme *ManagedEntityDefinition) GetName() string {
+	return bme.Name
+}
+func (bme *ManagedEntityDefinition) GetClassID() ClassID {
+	return bme.ClassID
+}
+func (bme *ManagedEntityDefinition) GetMessageTypes() mapset.Set {
+	return bme.MessageTypes
+}
+func (bme *ManagedEntityDefinition) GetAllowedAttributeMask() uint16 {
+	return bme.AllowedAttributeMask
+}
+func (bme *ManagedEntityDefinition) GetAttributeDefinitions() *AttributeDefinitionMap {
+	return &bme.AttributeDefinitions
+}
+
+func (bme *ManagedEntityDefinition) DecodeAttributes(mask uint16, data []byte, p gopacket.PacketBuilder, msgType byte) (AttributeValueMap, error) {
+	if (mask | bme.GetAllowedAttributeMask()) != bme.GetAllowedAttributeMask() {
+		// TODO: Provide custom error code so a response 'result' can properly be coded
+		return nil, errors.New("unsupported attribute mask")
+	}
+	keyList := GetAttributeDefinitionMapKeys(bme.AttributeDefinitions)
+
+	attrMap := make(AttributeValueMap, bits.OnesCount16(mask))
+	for _, index := range keyList {
+		if index == 0 {
+			continue // Skip Entity ID
+		}
+		attrDef := bme.AttributeDefinitions[index]
+		name := attrDef.GetName()
+
+		if mask&(1<<(16-uint(index))) != 0 {
+			value, err := attrDef.Decode(data, p, msgType)
+			if err != nil {
+				return nil, err
+			}
+			if attrDef.IsTableAttribute() {
+				switch msgType {
+				default:
+					return nil, errors.New(fmt.Sprintf("unsupported Message Type '%v' for table serialization", msgType))
+
+				case byte(Get) | AK: // Get Response
+					attrMap[name] = value
+					data = data[4:]
+
+				case byte(GetNext) | AK: // Get Next Response
+					// Value is a partial octet buffer we need to collect and at
+					// the end (last segment) pull it up into more appropriate table
+					// rows
+					valueBuffer, ok := value.([]byte)
+					if !ok {
+						panic("unexpected type already returned as get-next-response attribute data")
+					}
+					if existing, found := attrMap[name]; found {
+						prev, ok := existing.([]byte)
+						if !ok {
+							panic("unexpected type already in attribute value map")
+						}
+						attrMap[name] = append(prev, valueBuffer...)
+					} else {
+						attrMap[name] = valueBuffer
+					}
+					if size := attrDef.GetSize(); size != 0 && size > len(valueBuffer) {
+						panic("unexpected size difference")
+					}
+					data = data[len(valueBuffer):]
+
+				case byte(Set) | AR: // Set Request
+					fmt.Println("TODO")
+
+				case byte(SetTable) | AR: // Set Table Request
+					// TODO: Only baseline supported at this time
+					return nil, errors.New("attribute encode for set-table-request not yet supported")
+				}
+			} else {
+				attrMap[name] = value
+				data = data[attrDef.GetSize():]
+			}
+		}
+	}
+	return attrMap, nil
+}
+
+func (bme *ManagedEntityDefinition) SerializeAttributes(attr AttributeValueMap, mask uint16,
+	b gopacket.SerializeBuffer, msgType byte, bytesAvailable int) error {
+	if (mask | bme.GetAllowedAttributeMask()) != bme.GetAllowedAttributeMask() {
+		// TODO: Provide custom error code so a response 'result' can properly be coded
+		return errors.New("unsupported attribute mask")
+	}
+	// TODO: Need to limit number of bytes appended to not exceed packet size
+	// Is there space/metadata info in 'b' parameter to allow this?
+	keyList := GetAttributeDefinitionMapKeys(bme.AttributeDefinitions)
+
+	for _, index := range keyList {
+		if index == 0 {
+			continue // Skip Entity ID
+		}
+		attrDef := bme.AttributeDefinitions[index]
+
+		if mask&(1<<(16-uint(index))) != 0 {
+			value, ok := attr[attrDef.GetName()]
+			if !ok {
+				msg := fmt.Sprintf("attribute not found: '%v'", attrDef.GetName())
+				return errors.New(msg)
+			}
+			size, err := attrDef.SerializeTo(value, b, msgType, bytesAvailable)
+			if err != nil {
+				return err
+			}
+			bytesAvailable -= size
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/cboling/omci/generated/mgcconfigdata.go b/vendor/github.com/cboling/omci/generated/mgcconfigdata.go
new file mode 100644
index 0000000..60b47bb
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/mgcconfigdata.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MgcConfigDataClassId ClassID = ClassID(155)
+
+var mgcconfigdataBME *ManagedEntityDefinition
+
+// MgcConfigData (class ID #155)
+//	The MGC config data ME defines the MGC configuration associated with an MG subscriber. It is
+//	conditionally required for ONUs that support ITU-T H.248 VoIP services. If a non-OMCI interface
+//	is used to manage VoIP signalling, this ME is unnecessary.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with one or more VoIP voice CTP MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Primary Mgc
+//			Primary MGC: This attribute points to a network address ME that contains the name (IP address or
+//			resolved name) of the primary MGC that controls the signalling messages. The port is optional
+//			and defaults to 2944 for text message formats and 2955 for binary message formats. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Secondary Mgc
+//			Secondary MGC: This attribute points to a network address ME that contains the name (IP address
+//			or resolved name) of the secondary or backup MGC that controls the signalling messages. The port
+//			is optional and defaults to 2944 for text message formats and 2955 for binary message formats.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tcp_Udp Pointer
+//			TCP/UDP pointer: This attribute points to the TCP/UDP config data ME to be used for
+//			communication with the MGC. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Version
+//			Version:	This integer attribute reports the version of the Megaco protocol in use. The ONU
+//			should deny an attempt by the OLT to set or create a value that it does not support. The value 0
+//			indicates that no particular version is specified. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Message Format
+//			The default value is recommended to be 0. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Maximum Retry Time
+//			Maximum retry time: This attribute specifies the maximum retry time for MGC transactions, in
+//			seconds. The default value 0 specifies vendor-specific implementation. (R, W) (optional)
+//			(2 bytes)
+//
+//		Maximum Retry Attempts
+//			Maximum retry attempts: This attribute specifies the maximum number of times a message is
+//			retransmitted to the MGC. The recommended default value 0 specifies vendor-specific
+//			implementation. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Service Change Delay
+//			Service change delay: This attribute specifies the service status delay time for changes in line
+//			service status. This attribute is specified in seconds. The default value 0 specifies no delay.
+//			(R, W) (optional) (2 bytes)
+//
+//		Termination Id Base
+//			Termination ID base: This attribute specifies the base string for the ITU-T H.248 physical
+//			termination ID(s) for this ONU. This string is intended to uniquely identify an ONU. Vendor-
+//			specific termination identifiers (port IDs) are optionally added to this string to uniquely
+//			identify a termination on a specific ONU. (R, W) (optional) (25 bytes)
+//
+//		Softswitch
+//			Softswitch:	This attribute identifies the gateway softswitch vendor. The format is four ASCII
+//			coded alphabetic characters [A..Z] as defined in [ATIS0300220]. A value of four null bytes
+//			indicates an unknown or unspecified vendor. (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Message Id Pointer
+//			Message ID pointer: This attribute points to a large string whose value specifies the message
+//			identifier string for ITU-T H.248 messages originated by the ONU. (R, W, setbycreate) (optional)
+//			(2 bytes)
+//
+type MgcConfigData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	mgcconfigdataBME = &ManagedEntityDefinition{
+		Name:    "MgcConfigData",
+		ClassID: 155,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("PrimaryMgc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("SecondaryMgc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("TcpUdpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  ByteField("Version", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  ByteField("MessageFormat", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint16Field("MaximumRetryTime", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  Uint16Field("MaximumRetryAttempts", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8:  Uint16Field("ServiceChangeDelay", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  MultiByteField("TerminationIdBase", 25, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: Uint32Field("Softswitch", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: Uint16Field("MessageIdPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 11),
+		},
+	}
+}
+
+// NewMgcConfigData (class ID 155 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMgcConfigData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(mgcconfigdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/mgcperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/mgcperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..dd55c82
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/mgcperformancemonitoringhistorydata.go
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MgcPerformanceMonitoringHistoryDataClassId ClassID = ClassID(156)
+
+var mgcperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// MgcPerformanceMonitoringHistoryData (class ID #156)
+//	The MGC monitoring data ME provides run-time statistics for an active MGC association. Instances
+//	of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the MGC config data or MGC config
+//		portal ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the associated MGC config data or
+//			to the MGC config portal ME. If a non-OMCI configuration method is used for VoIP, there can be
+//			only one live ME instance, associated with the MGC config portal, and with ME ID 0. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Received Messages
+//			Received messages: This attribute counts the number of received Megaco messages on this
+//			association, as defined by [ITUT H.341]. (R) (mandatory) (4 bytes)
+//
+//		Received Octets
+//			Received octets: This attribute counts the total number of octets received on this association,
+//			as defined by [ITU-T H.341]. (R) (mandatory) (4 bytes)
+//
+//		Sent Messages
+//			Sent messages: This attribute counts the total number of Megaco messages sent over this
+//			association, as defined by [ITU-T H.341]. (R) (mandatory) (4 bytes)
+//
+//		Sent Octets
+//			Sent octets:	This attribute counts the total number of octets sent over this association, as
+//			defined by [ITU-T H.341]. (R) (mandatory) (4 bytes)
+//
+//		Protocol Errors
+//			(R) (mandatory) (4 bytes)
+//
+//		Transport Losses
+//			Transport losses: This attribute counts the total number of transport losses (e.g., socket
+//			problems) detected on this association. A link loss is defined as loss of communication with the
+//			remote entity due to hardware/transient problems, or problems in related software. (R)
+//			(mandatory) (4 bytes)
+//
+//		Last Detected Event
+//			(R) (mandatory) (1 byte)
+//
+//		Last Detected Event Time
+//			Last detected event time: This attribute reports the time in seconds since the last event on
+//			this association was detected, as defined by [ITU-T H.341]. (R) (mandatory) (4 bytes)
+//
+//		Last Detected Reset Time
+//			Last detected reset time: This attribute reports the time in seconds since these statistics were
+//			last reset, as defined by [ITU-T H.341]. Under normal circumstances, a get action on this
+//			attribute would return 900 s to indicate a completed 15 min interval. (R) (mandatory) (4 bytes)
+//
+type MgcPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	mgcperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "MgcPerformanceMonitoringHistoryData",
+		ClassID: 156,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("ReceivedMessages", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("ReceivedOctets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("SentMessages", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("SentOctets", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("ProtocolErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TransportLosses", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  ByteField("LastDetectedEvent", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("LastDetectedEventTime", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("LastDetectedResetTime", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+		},
+	}
+}
+
+// NewMgcPerformanceMonitoringHistoryData (class ID 156 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMgcPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(mgcperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/mocaethernetperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/mocaethernetperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..53fd742
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/mocaethernetperformancemonitoringhistorydata.go
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MocaEthernetPerformanceMonitoringHistoryDataClassId ClassID = ClassID(163)
+
+var mocaethernetperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// MocaEthernetPerformanceMonitoringHistoryData (class ID #163)
+//	This ME collects PM data for an MoCA Ethernet interface. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PPTP MoCA UNI ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP MoCA UNI. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Incoming Unicast Packets
+//			Incoming unicast packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Discarded Packets
+//			Incoming discarded packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Errored Packets
+//			Incoming errored packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Unknown Packets
+//			Incoming unknown packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Multicast Packets
+//			Incoming multicast packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Broadcast Packets
+//			Incoming broadcast packets:	(R) (optional) (4 bytes)
+//
+//		Incoming Octets
+//			Incoming octets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Unicast Packets
+//			Outgoing unicast packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Discarded Packets
+//			Outgoing discarded packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Errored Packets
+//			Outgoing errored packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Unknown Packets
+//			Outgoing unknown packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Multicast Packets
+//			Outgoing multicast packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Broadcast Packets
+//			Outgoing broadcast packets:	(R) (optional) (4 bytes)
+//
+//		Outgoing Octets
+//			Outgoing octets:	(R) (optional) (4 bytes)
+//
+type MocaEthernetPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	mocaethernetperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "MocaEthernetPerformanceMonitoringHistoryData",
+		ClassID: 163,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("IncomingUnicastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  Uint32Field("IncomingDiscardedPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5:  Uint32Field("IncomingErroredPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  Uint32Field("IncomingUnknownPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7:  Uint32Field("IncomingMulticastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8:  Uint32Field("IncomingBroadcastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint32Field("IncomingOctets", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: Uint32Field("OutgoingUnicastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint32Field("OutgoingDiscardedPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: Uint32Field("OutgoingErroredPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+			13: Uint32Field("OutgoingUnknownPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint32Field("OutgoingMulticastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 14),
+			15: Uint32Field("OutgoingBroadcastPackets", 0, mapset.NewSetWith(Read), false, false, true, false, 15),
+			16: Uint32Field("OutgoingOctets", 0, mapset.NewSetWith(Read), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewMocaEthernetPerformanceMonitoringHistoryData (class ID 163 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMocaEthernetPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(mocaethernetperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/mplspseudowireterminationpoint.go b/vendor/github.com/cboling/omci/generated/mplspseudowireterminationpoint.go
new file mode 100644
index 0000000..25f3305
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/mplspseudowireterminationpoint.go
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MplsPseudowireTerminationPointClassId ClassID = ClassID(333)
+
+var mplspseudowireterminationpointBME *ManagedEntityDefinition
+
+// MplsPseudowireTerminationPoint (class ID #333)
+//	This ME contains the configuration data of a pseudowire whose underlying transport method is
+//	MPLS. Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		Zero or one instance of this ME is associated with each instance of the pseudowire TP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Tp Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tp Pointer
+//			TP pointer:	This attribute points to the instance of the TP associated with this MPLS PW TP. The
+//			type of the associated TP is determined by the TP type attribute. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Mpls Label Indicator
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Mpls Pw Direction
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Mpls Pw Uplink Label
+//			MPLS PW uplink label: This attribute specifies the label of the inner MPLS pseudowire upstream.
+//			The attribute is not meaningful for unidirectional downstream PWs. (R, W, setbycreate)
+//			(mandatory) (4 bytes)
+//
+//		Mpls Pw Downlink Label
+//			MPLS PW downlink label: This attribute specifies the label of the inner MPLS pseudowire
+//			downstream. The attribute is not meaningful for unidirectional upstream PWs. (R, W, setbycreate)
+//			(mandatory) (4 bytes)
+//
+//		Mpls Pw Tc
+//			NOTE 1 – The TC field was previously known as EXP. Refer to [bIETF RFC 5462].
+//
+//		Mpls Tunnel Direction
+//			(R, W, setbycreate) (mandatory for double-labelled case) (1 byte)
+//
+//		Mpls Tunnel Uplink Label
+//			MPLS tunnel uplink label: This attribute specifies the (outer) label for the upstream MPLS
+//			tunnel. If the MPLS tunnel is downstream only, this attribute should be set to 0. (R, W,
+//			setbycreate) (mandatory for double-labelled case) (4 bytes)
+//
+//		Mpls Tunnel Downlink Label
+//			MPLS tunnel downlink label: This attribute specifies the (outer) label for the downstream MPLS
+//			tunnel. If the MPLS tunnel is upstream only, this attribute should be set to 0. (R, W,
+//			setbycreate) (mandatory for double-labelled case) (4 bytes)
+//
+//		Mpls Tunnel Tc
+//			NOTE 2 – The TC field was previously known as EXP. Refer to [bIETF RFC 5462].
+//
+//		Pseudowire Type
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Pseudowire Control Word Preference
+//			Pseudowire control word preference: When set to true, this Boolean attribute specifies that a
+//			control word is to be sent with each packet. Some PW types mandate the use of a control word in
+//			any event. In such cases, the value configured for this attribute has no effect on the presence
+//			of the control word. (R, W, setbycreate) (optional) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by the
+//			MPLS pseudowire TP. Administrative state is further described in clause A.1.6. (R, W) (optional)
+//			(1 byte)
+//
+//		Operational State
+//			Operational state: This attribute reports whether the ME is currently capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+type MplsPseudowireTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	mplspseudowireterminationpointBME = &ManagedEntityDefinition{
+		Name:    "MplsPseudowireTerminationPoint",
+		ClassID: 333,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("TpType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("TpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("MplsLabelIndicator", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  ByteField("MplsPwDirection", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint32Field("MplsPwUplinkLabel", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint32Field("MplsPwDownlinkLabel", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  ByteField("MplsPwTc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("MplsTunnelDirection", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  Uint32Field("MplsTunnelUplinkLabel", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: Uint32Field("MplsTunnelDownlinkLabel", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: ByteField("MplsTunnelTc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 11),
+			12: Uint16Field("PseudowireType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 12),
+			13: ByteField("PseudowireControlWordPreference", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 13),
+			14: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 15),
+		},
+	}
+}
+
+// NewMplsPseudowireTerminationPoint (class ID 333 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMplsPseudowireTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(mplspseudowireterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/multicastgeminterworkingterminationpoint.go b/vendor/github.com/cboling/omci/generated/multicastgeminterworkingterminationpoint.go
new file mode 100644
index 0000000..a855376
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/multicastgeminterworkingterminationpoint.go
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MulticastGemInterworkingTerminationPointClassId ClassID = ClassID(281)
+
+var multicastgeminterworkingterminationpointBME *ManagedEntityDefinition
+
+// MulticastGemInterworkingTerminationPoint (class ID #281)
+//	An instance of this ME represents a point in a G-PON ONU where a multicast service interworks
+//	with the GEM layer. At this point, a multicast bit stream is reconstructed from GEM packets.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Multicast interworking GEM modes of operation
+//
+//	The default multicast operation of the PON is where all the multicast content streams are
+//	carried in one PON layer connection (GEM port). This connection is then specified in the first
+//	entry of the IPv4 or IPv6 multicast address table, as the case may be. This single entry also
+//	specifies an all-inclusive IP multicast destination address (DA) range (e.g., 224.0.0.0 to
+//	239.255.255.255 in the case of IPv4). The ONU then filters the traffic based on either Ethernet
+//	MAC addresses or IP addresses. The associated GEM port network CTP ME specifies the GEM port-ID
+//	that supports all multicast connections.
+//
+//	In the default multicast operation, all multicast content streams are placed in one PON layer
+//	connection (GEM port). The OLT sets up a completely conventional model, a pointer from the
+//	multicast GEM IW termination to a GEM port network CTP. The OLT configures the GEM port-ID of
+//	the GEM port network CTP into the appropriate multicast address table attribute(s), along with
+//	the other table fields that specify the range of IP multicast DAs. The ONU accepts the entire
+//	multicast stream through the designated GEM port, then filters the traffic based on either the
+//	Ethernet MAC address or IP DA.
+//
+//	An optional multicast configuration supports separate multicast streams carried over separate
+//	PON layer connections, i.e., on separate GEM ports. This permits the ONU to filter multicast
+//	streams at the GEM level, which is efficient in hardware, while ignoring other multicast streams
+//	that may be of interest to other ONUs on the PON.
+//
+//	After configuring the explicit model for the first multicast GEM port, the OLT supports multiple
+//	multicast GEM ports by then configuring additional entries into the multicast address table(s),
+//	entries with different GEM port-IDs. The OMCI model is defined such that these ports are
+//	implicitly grouped together and served by the single explicit GEM port network CTP. No
+//	additional GEM network CTPs need be created or linked for the additional GEM ports.
+//
+//	Several multicast GEM IW TPs can exist, each linked to separate bridge ports or mappers to serve
+//	different communities of interest in a complex ONU.
+//
+//	Discovery of multicast support
+//
+//	The OLT uses the multicast GEM IW TP entity as the means to discover the ONU's multicast
+//	capability. This entity is mandatory if multicast is supported by the ONU. If the OLT attempts
+//	to create this entity on an ONU that does not support multicast, the create command fails. The
+//	create or set command also fails if the OLT attempts to exploit optional features that the ONU
+//	does not support, e.g., in attempting to write a multicast address table with more than a single
+//	entry or to create multiple multicast GEM IW TPs.
+//
+//	This ME is defined by a similarity to the unicast GEM IW TP, and a number of its attributes are
+//	not meaningful in a multicast context. These attributes are set to 0 and not used, as indicated
+//	in the following.
+//
+//	Relationships
+//		An instance of this ME exists for each occurrence of transformation of GEM packets into a
+//		multicast data stream.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0xFFFF
+//			is reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Gem Port Network Ctp Connectivity Pointer
+//			GEM port network CTP connectivity pointer: This attribute points to an instance of the GEM port
+//			network CTP that is associated with this multicast GEM IW TP. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Interworking Option
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Service Profile Pointer
+//			Service profile pointer: This attribute is set to 0 and not used. For backward compatibility, it
+//			may also be set to point to a MAC bridge service profile or IEEE 802.1p mapper service profile.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Pptp Counter
+//			PPTP counter: This attribute represents the number of instances of PPTP MEs associated with this
+//			instance of the multicast GEM IW TP. This attribute conveys no information that is not available
+//			elsewhere; it may be set to 0xFF and not used. (R) (optional) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Gal Profile Pointer
+//			GAL profile pointer: This attribute is set to 0 and not used. For backward compatibility, it may
+//			also be set to point to a GAL Ethernet profile. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Ipv6 Multicast Address Table
+//			(R, W) (optional) (24N bytes, where N is the number of entries in the list.)
+//
+type MulticastGemInterworkingTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	multicastgeminterworkingterminationpointBME = &ManagedEntityDefinition{
+		Name:    "MulticastGemInterworkingTerminationPoint",
+		ClassID: 281,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("GemPortNetworkCtpConnectivityPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("InterworkingOption", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("ServiceProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: ByteField("PptpCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 5),
+			6: Uint16Field("GalProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: TableField("Ipv6MulticastAddressTable", TableInfo{nil, 24}, mapset.NewSetWith(Read, Write), false, true, false, 7),
+		},
+	}
+}
+
+// NewMulticastGemInterworkingTerminationPoint (class ID 281 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMulticastGemInterworkingTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(multicastgeminterworkingterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/multicastsubscriberconfiginfo.go b/vendor/github.com/cboling/omci/generated/multicastsubscriberconfiginfo.go
new file mode 100644
index 0000000..af2ea0d
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/multicastsubscriberconfiginfo.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MulticastSubscriberConfigInfoClassId ClassID = ClassID(310)
+
+var multicastsubscriberconfiginfoBME *ManagedEntityDefinition
+
+// MulticastSubscriberConfigInfo (class ID #310)
+//	This ME organizes data associated with multicast management at subscriber ports of IEEE 802.1
+//	bridges, including IEEE 802.1p mappers when the provisioning model is mapper-based rather than
+//	bridge-based. Instances of this ME are created and deleted by the OLT. Because of backward
+//	compatibility considerations, a subscriber port without an associated multicast subscriber
+//	config info ME would be expected to support unrestricted multicast access; this ME may therefore
+//	be viewed as restrictive, rather than permissive.
+//
+//	Through separate attributes, this ME supports either a single multicast operations profile in
+//	its backward compatible form, or a list of multicast operations profiles instead (the list may
+//	of course contain a single entry). The OLT can determine whether the ONU supports the multiple
+//	profile capability by performing a get operation on the optional multicast service package table
+//	attribute, which exists only on ONUs that are prepared to support the feature.
+//
+//	Relationships
+//		An instance of this ME is associated with one instance of the MAC bridge port configuration data
+//		or the IEEE 802.1p mapper service profile.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data or IEEE 802.1p mapper ME. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Me Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Multicast Operations Profile Pointer
+//			Multicast operations profile pointer: This attribute points to an instance of the multicast
+//			operations profile. This attribute is ignored by the ONU if a non-empty multicast service
+//			package table attribute is present. (R,W, set-by-create) (mandatory) (2 bytes)
+//
+//		Max Simultaneous Groups
+//			Max simultaneous groups: This attribute specifies the maximum number of dynamic multicast groups
+//			that may be replicated to the client port at any one time. The recommended default value 0
+//			specifies that no administrative limit is to be imposed. (R, W, setbycreate) (optional)
+//			(2 bytes)
+//
+//		Max Multicast Bandwidth
+//			Max multicast bandwidth: This attribute specifies the maximum imputed dynamic bandwidth, in
+//			bytes per second, that may be delivered to the client port at any one time. The recommended
+//			default value 0 specifies that no administrative limit is to be imposed. (R, W, setbycreate)
+//			(optional) (4 bytes)
+//
+//		Bandwidth Enforcement
+//			Bandwidth enforcement: The recommended default value of this Boolean attribute is false, and
+//			specifies that attempts to exceed the max multicast bandwidth be counted but honoured. The value
+//			true specifies that such attempts be counted and denied. The imputed bandwidth value is taken
+//			from the dynamic access control list table, both for a new join request and for pre-existing
+//			groups. (R, W, setbycreate) (optional) (1 byte)
+//
+//		Multicast Service Package Table
+//			(R, W) (optional) (20N bytes, where N is the number of entries in the table)
+//
+type MulticastSubscriberConfigInfo struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	multicastsubscriberconfiginfoBME = &ManagedEntityDefinition{
+		Name:    "MulticastSubscriberConfigInfo",
+		ClassID: 310,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("MeType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("MulticastOperationsProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("MaxSimultaneousGroups", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4: Uint32Field("MaxMulticastBandwidth", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5: ByteField("BandwidthEnforcement", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6: TableField("MulticastServicePackageTable", TableInfo{nil, 22}, mapset.NewSetWith(Read, Write), false, true, false, 6),
+		},
+	}
+}
+
+// NewMulticastSubscriberConfigInfo (class ID 310 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMulticastSubscriberConfigInfo(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(multicastsubscriberconfiginfoBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/multicastsubscribermonitor.go b/vendor/github.com/cboling/omci/generated/multicastsubscribermonitor.go
new file mode 100644
index 0000000..245cb24
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/multicastsubscribermonitor.go
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const MulticastSubscriberMonitorClassId ClassID = ClassID(311)
+
+var multicastsubscribermonitorBME *ManagedEntityDefinition
+
+// MulticastSubscriberMonitor (class ID #311)
+//	This ME provides the current status of each port with respect to its multicast subscriptions. It
+//	may be useful for status monitoring or debugging purposes. The status table includes all dynamic
+//	groups currently subscribed by the port.
+//
+//	Relationships
+//		Instances of this ME are created and deleted at the request of the OLT. One instance may exist
+//		for each IEEE 802.1 UNI configured to support multicast subscription.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data or IEEE 802.1p mapper ME. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Me Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Current Multicast Bandwidth
+//			Current multicast bandwidth: This attribute is the ONU's (BE) estimate of the actual bandwidth
+//			currently being delivered to this particular MAC bridge port over all dynamic multicast groups.
+//			(R) (optional) (4 bytes)
+//
+//		Join Messages Counter
+//			Join messages counter: This attribute counts the number of times the corresponding subscriber
+//			sent a join message that was accepted. When full, the counter rolls over to 0. (R) (optional)
+//			(4 bytes)
+//
+//		Bandwidth Exceeded Counter
+//			Bandwidth exceeded counter: This attribute counts the number of join messages that did exceed,
+//			or would have exceeded, the max multicast bandwidth, whether accepted or denied. When full, the
+//			counter rolls over to 0. (R) (optional) (4 bytes)
+//
+//		Ipv4 Active Group List Table
+//			(R) (mandatory) (24N bytes)
+//
+//		Ipv6 Active Group List Table
+//			(R) (optional) (58N bytes)
+//
+type MulticastSubscriberMonitor struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	multicastsubscribermonitorBME = &ManagedEntityDefinition{
+		Name:    "MulticastSubscriberMonitor",
+		ClassID: 311,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("MeType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint32Field("CurrentMulticastBandwidth", 0, mapset.NewSetWith(Read), false, false, true, false, 2),
+			3: Uint32Field("JoinMessagesCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4: Uint32Field("BandwidthExceededCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: TableField("Ipv4ActiveGroupListTable", TableInfo{nil, 24}, mapset.NewSetWith(Read), false, false, false, 5),
+			6: TableField("Ipv6ActiveGroupListTable", TableInfo{nil, 58}, mapset.NewSetWith(Read), false, true, false, 6),
+		},
+	}
+}
+
+// NewMulticastSubscriberMonitor (class ID 311 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewMulticastSubscriberMonitor(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(multicastsubscribermonitorBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/networkaddress.go b/vendor/github.com/cboling/omci/generated/networkaddress.go
new file mode 100644
index 0000000..38eff06
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/networkaddress.go
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const NetworkAddressClassId ClassID = ClassID(137)
+
+var networkaddressBME *ManagedEntityDefinition
+
+// NetworkAddress (class ID #137)
+//	The network address ME associates a network address with security methods required to access a
+//	server. It is conditionally required for ONUs that support VoIP services. The address may take
+//	the form of a URL, a fully qualified path or IP address represented as an ACII string.
+//
+//	If a non-OMCI interface is used to manage VoIP signalling, this ME is unnecessary.
+//
+//	Instances of this ME are created and deleted by the OLT or the ONU, depending on the method used
+//	and case.
+//
+//	Relationships
+//		Any ME that requires a network address may link to an instance of this ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Instances of
+//			this ME created autonomously by the ONU have IDs in the range 0..0x7FFF. Instances created by
+//			the OLT have IDs in the range 0x8000..0xFFFE. The value 0xFFFF is reserved. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Security Pointer
+//			Security pointer: This attribute points to an authentication security method ME. The
+//			authentication security method indicates the username and password to be used when retrieving
+//			the network address indicated by this ME. A null pointer indicates that security attributes are
+//			not defined for this network address. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Address Pointer
+//			Address pointer: This attribute points to the large string ME that contains the network address.
+//			It may contain a fully qualified domain name, URI or IP address. The URI may also contain a port
+//			identifier (e.g., "x.y.z.com:5060"). A null pointer indicates that no network address is
+//			defined. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+type NetworkAddress struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	networkaddressBME = &ManagedEntityDefinition{
+		Name:    "NetworkAddress",
+		ClassID: 137,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("SecurityPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("AddressPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+		},
+	}
+}
+
+// NewNetworkAddress (class ID 137 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewNetworkAddress(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(networkaddressBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/networkdialplantable.go b/vendor/github.com/cboling/omci/generated/networkdialplantable.go
new file mode 100644
index 0000000..75e6bde
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/networkdialplantable.go
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const NetworkDialPlanTableClassId ClassID = ClassID(145)
+
+var networkdialplantableBME *ManagedEntityDefinition
+
+// NetworkDialPlanTable (class ID #145)
+//	The network dial plan table ME is optional for ONUs providing VoIP services. This ME is used to
+//	provision dial plans from the OLT. Instances of this ME are created and deleted by the OLT. If a
+//	non-OMCI interface is used to manage SIP for VoIP, this ME is unnecessary.
+//
+//	Relationships
+//		An instance of this ME may be associated with one or more instances of the SIP user data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Dial Plan Number
+//			Dial plan number: This attribute indicates the current number of dial plans in the dial plan
+//			table. (R) (mandatory) (2 bytes)
+//
+//		Dial Plan Table Max Size
+//			Dial plan table max size: This attribute defines the maximum number of dial plans that can be
+//			stored in the dial plan table. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Critical Dial Timeout
+//			Critical dial timeout: This attribute defines the critical dial timeout for digit map
+//			processing, in milliseconds. The recommended default value is 4000 ms. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Partial Dial Timeout
+//			Partial dial timeout: This attribute defines the partial dial timeout for digit map processing,
+//			in milliseconds. The recommended default value is 16000 ms. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Dial Plan Format
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Dial Plan Table
+//			(R, W) (mandatory) (30 * N bytes, where N is the number of dial plans)
+//
+type NetworkDialPlanTable struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	networkdialplantableBME = &ManagedEntityDefinition{
+		Name:    "NetworkDialPlanTable",
+		ClassID: 145,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetNext,
+			Set,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("DialPlanNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("DialPlanTableMaxSize", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 2),
+			3: Uint16Field("CriticalDialTimeout", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("PartialDialTimeout", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: ByteField("DialPlanFormat", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: TableField("DialPlanTable", TableInfo{0, 1}, mapset.NewSetWith(Read, Write), false, false, false, 6),
+		},
+	}
+}
+
+// NewNetworkDialPlanTable (class ID 145 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewNetworkDialPlanTable(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(networkdialplantableBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/olt-g.go b/vendor/github.com/cboling/omci/generated/olt-g.go
new file mode 100644
index 0000000..d02bf93
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/olt-g.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OltGClassId ClassID = ClassID(131)
+
+var oltgBME *ManagedEntityDefinition
+
+// OltG (class ID #131)
+//	This optional ME identifies the OLT to which an ONU is connected. This ME provides a way for the
+//	ONU to configure itself for operability with a particular OLT. It also provides a way for the
+//	OLT to communicate the time of day to the ONU.
+//
+//	An ONU that supports this ME automatically creates an instance of it. Immediately following the
+//	start-up phase, the OLT should set the ONU to the desired configuration. Interpretation of the
+//	OLT vendor ID, equipment ID and version attributes is a matter for negotiation between the two
+//	vendors involved.
+//
+//	Relationships
+//		The single instance of this ME is associated with the ONU ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Olt Vendor Id
+//			OLT vendor ID: This attribute identifies the OLT vendor. It is the same as the four most
+//			significant bytes of an ONU serial number specified in the respective TC layer specification.
+//			Upon instantiation, this attribute comprises all spaces. (R, W) (mandatory) (4 bytes)
+//
+//		Equipment Id
+//			Equipment ID: This attribute may be used to identify the specific type of OLT. The default value
+//			of all spaces indicates that equipment ID information is not available or applicable to the OLT
+//			being represented. (R, W) (mandatory) (20 bytes)
+//
+//		Version
+//			Version:	This attribute identifies the version of the OLT as defined by the vendor. The default
+//			left-justified ASCII string "0" (padded with trailing nulls) indicates that version information
+//			is not available or applicable to the OLT being represented. (R, W) (mandatory) (14 bytes)
+//
+//		Time Of Day Information
+//			NOTE – In ITU-T G.987/ITU-T G.989 systems, the superframe count field of the time of day
+//			information attribute contains the 32 LSBs of the actual counter.
+//
+type OltG struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	oltgBME = &ManagedEntityDefinition{
+		Name:    "OltG",
+		ClassID: 131,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint32Field("OltVendorId", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: MultiByteField("EquipmentId", 20, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: MultiByteField("Version", 14, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: MultiByteField("TimeOfDayInformation", 14, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+		},
+	}
+}
+
+// NewOltG (class ID 131 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOltG(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(oltgBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/omci.go b/vendor/github.com/cboling/omci/generated/omci.go
new file mode 100644
index 0000000..f6df0c8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/omci.go
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OmciClassId ClassID = ClassID(287)
+
+var omciBME *ManagedEntityDefinition
+
+// Omci (class ID #287)
+//	This ME describes the ONU's general level of support for OMCI MEs and messages. This ME is not
+//	included in an MIB upload.
+//
+//	Relationships
+//		One instance exists in the ONU. The ME entities are related to the OMCI entity.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Me Type Table
+//			ME type table: This attribute lists the ME classes supported by the ONU. Each entry contains the
+//			ME class value (see Table 11.2.4-1) of an ME type. (R) (mandatory) (2 * N bytes, where N is the
+//			number of entries in the list.)
+//
+//		Message Type Table
+//			Message type table: This attribute is a list of message types (MTs) supported by the ONU. Each
+//			entry contains the MT of an OMCI message (see Table 11.2.2-1). (R) (mandatory) (M bytes, where M
+//			is the number of entries in the list.)
+//
+type Omci struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	omciBME = &ManagedEntityDefinition{
+		Name:    "Omci",
+		ClassID: 287,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetNext,
+		),
+		AllowedAttributeMask: 0XC000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: TableField("MeTypeTable", TableInfo{0, 2}, mapset.NewSetWith(Read), false, false, false, 1),
+			2: TableField("MessageTypeTable", TableInfo{0, 1}, mapset.NewSetWith(Read), false, false, false, 2),
+		},
+	}
+}
+
+// NewOmci (class ID 287 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOmci(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(omciBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/omcidefs.go b/vendor/github.com/cboling/omci/generated/omcidefs.go
new file mode 100644
index 0000000..9dbc5ee
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/omcidefs.go
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import (
+	"fmt"
+	"github.com/deckarep/golang-set"
+	"github.com/google/gopacket"
+)
+
+// MsgType represents a OMCI message-type
+type MsgType byte
+
+// MsgType represents the status field in a OMCI Response frame
+type Results byte
+
+// AttributeAccess represents the access allowed to an Attribute.  Some MEs
+// are instantiated by the ONU autonomously. Others are instantiated on
+// explicit request of the OLT via a create command, and a few ME types may
+// be instantiated in either way, depending on the ONU architecture or
+// circumstances.
+//
+// Attributes of an ME that is auto-instantiated by the ONU can be read (R),
+// write (W), or read, write (R, W). On the other hand, attributes of a ME
+// that is instantiated by the OLT can be either (R), (W), (R, W),
+// (R, set by create) or (R, W, set by create).
+type AttributeAccess byte
+
+// ClassID is a 16-bit value that uniquely defines a Managed Entity clas
+// from the ITU-T G.988 specification.
+type ClassID uint16
+
+func (cid ClassID) String() string {
+	if entity, err := LoadManagedEntityDefinition(cid); err == nil {
+		return fmt.Sprintf("[%s] (%d/%#x)",
+			entity.GetManagedEntityDefinition().GetName(), uint16(cid), uint16(cid))
+	}
+	return fmt.Sprintf("unknown ClassID")
+}
+
+const (
+	// AK (Bit 6), indicates whether this message is an AK to an action request.
+	// If a message is an AK, this bit is set to 1. If the message is not a
+	// response to a command, this bit is set to 0. In messages sent by the OLT,
+	// this bit is always 0.
+	AK byte = 0x20
+
+	// AR (Bit 7), acknowledge request, indicates whether the message requires an
+	// AK. An AK is a response to an action request, not a link layer handshake.
+	// If an AK is expected, this bit is set to 1. If no AK is expected, this bit
+	// is 0. In messages sent by the ONU, this bit is always 0
+	AR byte = 0x40
+
+	// MsgTypeMask provides a mask to get the base message type
+	MsgTypeMask = 0x1F
+)
+
+const (
+	// Message Types
+	Create                MsgType = 4
+	Delete                MsgType = 6
+	Set                   MsgType = 8
+	Get                   MsgType = 9
+	GetAllAlarms          MsgType = 11
+	GetAllAlarmsNext      MsgType = 12
+	MibUpload             MsgType = 13
+	MibUploadNext         MsgType = 14
+	MibReset              MsgType = 15
+	AlarmNotification     MsgType = 16
+	AttributeValueChange  MsgType = 17
+	Test                  MsgType = 18
+	StartSoftwareDownload MsgType = 19
+	DownloadSection       MsgType = 20
+	EndSoftwareDownload   MsgType = 21
+	ActivateSoftware      MsgType = 22
+	CommitSoftware        MsgType = 23
+	SynchronizeTime       MsgType = 24
+	Reboot                MsgType = 25
+	GetNext               MsgType = 26
+	TestResult            MsgType = 27
+	GetCurrentData        MsgType = 28
+	SetTable              MsgType = 29 // Defined in Extended Message Set Only
+)
+
+func (mt MsgType) String() string {
+	switch mt {
+	default:
+		return "Unknown"
+	case Create:
+		return "Create"
+	case Delete:
+		return "Delete"
+	case Set:
+		return "Set"
+	case Get:
+		return "Get"
+	case GetAllAlarms:
+		return "Get All Alarms"
+	case GetAllAlarmsNext:
+		return "Get All Alarms Next"
+	case MibUpload:
+		return "MIB Upload"
+	case MibUploadNext:
+		return "MIB Upload Next"
+	case MibReset:
+		return "MIB Reset"
+	case AlarmNotification:
+		return "Alarm Notification"
+	case AttributeValueChange:
+		return "Attribute Value Change"
+	case Test:
+		return "Test"
+	case StartSoftwareDownload:
+		return "Start Software Download"
+	case DownloadSection:
+		return "Download Section"
+	case EndSoftwareDownload:
+		return "EndSoftware Download"
+	case ActivateSoftware:
+		return "Activate Software"
+	case CommitSoftware:
+		return "Commit Software"
+	case SynchronizeTime:
+		return "Synchronize Time"
+	case Reboot:
+		return "Reboot"
+	case GetNext:
+		return "Get Next"
+	case TestResult:
+		return "Test Result"
+	case GetCurrentData:
+		return "Get Current Data"
+	case SetTable:
+		return "Set Table"
+	}
+}
+
+var allNotificationTypes = [...]MsgType{
+	AlarmNotification,
+	AttributeValueChange,
+	TestResult,
+}
+
+// SupportsMsgType returns true if the managed entity supports the desired
+// Message Type / action
+func SupportsMsgType(entity IManagedEntityDefinition, msgType MsgType) bool {
+	return entity.GetMessageTypes().Contains(msgType)
+}
+
+func IsAutonomousNotification(mt MsgType) bool {
+	for _, m := range allNotificationTypes {
+		if mt == m {
+			return true
+		}
+	}
+	return false
+}
+
+const (
+	// Response status codes
+	Success          Results = 0 // command processed successfully
+	ProcessingError  Results = 1 // command processing error
+	NotSupported     Results = 2 // command not supported
+	ParameterError   Results = 3 // parameter error
+	UnknownEntity    Results = 4 // unknown managed entity
+	UnknownInstance  Results = 5 // unknown managed entity instance
+	DeviceBusy       Results = 6 // device busy
+	InstanceExists   Results = 7 // instance exists
+	AttributeFailure Results = 9 // Attribute(s) failed or unknown
+)
+
+func (rc Results) String() string {
+	switch rc {
+	default:
+		return "Unknown"
+	case Success:
+		return "Success"
+	case ProcessingError:
+		return "Processing Error"
+	case NotSupported:
+		return "Not Supported"
+	case ParameterError:
+		return "Parameter Error"
+	case UnknownEntity:
+		return "Unknown Entity"
+	case UnknownInstance:
+		return "Unknown Instance"
+	case DeviceBusy:
+		return "Device Busy"
+	case InstanceExists:
+		return "Instance Exists"
+	case AttributeFailure:
+		return "Attribute Failure"
+	}
+}
+
+const (
+	// Access allowed on a Managed Entity attribute
+	Read AttributeAccess = 1 << iota
+	Write
+	SetByCreate
+)
+
+func (access AttributeAccess) String() string {
+	switch access {
+	default:
+		return "Unknown"
+	case Read:
+		return "Read"
+	case Write:
+		return "Write"
+	case SetByCreate:
+		return "SetByCreate"
+	case Read | Write:
+		return "Read/Write"
+	case Read | SetByCreate:
+		return "Read/SetByCreate"
+	case Write | SetByCreate:
+		return "Write/SetByCreate"
+	case Read | Write | SetByCreate:
+		return "Read/Write/SetByCreate"
+	}
+}
+
+// SupportsAttributeAccess returns true if the managed entity attribute
+// supports the desired access
+func SupportsAttributeAccess(attr *AttributeDefinition, acc AttributeAccess) bool {
+	return attr.GetAccess().Contains(acc)
+}
+
+type IManagedEntityDefinition interface {
+	GetName() string
+	GetClassID() ClassID
+	GetMessageTypes() mapset.Set
+	GetAllowedAttributeMask() uint16
+	GetAttributeDefinitions() *AttributeDefinitionMap
+
+	DecodeAttributes(uint16, []byte, gopacket.PacketBuilder, byte) (AttributeValueMap, error)
+	SerializeAttributes(AttributeValueMap, uint16, gopacket.SerializeBuffer, byte, int) error
+}
+
+type IManagedEntity interface {
+	IManagedEntityDefinition
+	GetManagedEntityDefinition() IManagedEntityDefinition
+
+	GetEntityID() uint16
+	SetEntityID(uint16) error
+
+	GetAttributeMask() uint16
+
+	GetAttributeValueMap() *AttributeValueMap
+	GetAttribute(string) (interface{}, error)
+	GetAttributeByIndex(uint) (interface{}, error)
+
+	SetAttribute(string, interface{}) error
+	SetAttributeByIndex(uint, interface{}) error
+
+	DeleteAttribute(string) error
+	DeleteAttributeByIndex(uint) error
+}
diff --git a/vendor/github.com/cboling/omci/generated/omcierror.go b/vendor/github.com/cboling/omci/generated/omcierror.go
new file mode 100644
index 0000000..8bf369e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/omcierror.go
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Custom Go Error messages for common OMCI errors
+//
+// Response Status code related errors
+type OmciErrors interface {
+	Error() string
+	StatusCode() Results
+	GetError() error
+}
+
+type OmciError struct {
+	err        string
+	statusCode Results
+}
+
+func (e *OmciError) GetError() error {
+	return errors.New(e.err)
+}
+
+func (e *OmciError) Error() string {
+	return e.err
+}
+
+func (e *OmciError) StatusCode() Results {
+	return e.statusCode
+}
+
+func NewOmciError(text string, status Results) OmciErrors {
+	if status == Success {
+		panic("Do not use OmciError to convey successful results")
+	}
+	return &OmciError{
+		err:        text,
+		statusCode: status,
+	}
+}
+
+type OmciNonStatusError struct {
+	OmciError
+}
+
+// NewNonStatusError is for processing errors that do not involve
+// frame processing status & results
+func NewNonStatusError(args ...interface{}) OmciErrors {
+	defaultValue := "command processing error"
+	return &OmciProcessingError{
+		OmciError: OmciError{
+			err: genMessage(defaultValue, args...),
+		},
+	}
+}
+
+type OmciProcessingError struct {
+	OmciError
+}
+
+// NewProcessingError means the command processing failed at the ONU
+// for reasons not described by one of the more specific error codes.
+func NewProcessingError(args ...interface{}) OmciErrors {
+	defaultValue := "command processing error"
+	return &OmciProcessingError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: ProcessingError,
+		},
+	}
+}
+
+type NotSupportedError struct {
+	OmciError
+}
+
+// NewNotSupportedError means that the message type indicated in byte 3 is
+// not supported by the ONU.
+func NewNotSupportedError(args ...interface{}) OmciErrors {
+	defaultValue := "command not supported"
+	return &NotSupportedError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: NotSupported,
+		},
+	}
+}
+
+type ParamError struct {
+	OmciError
+	FailureMask uint16
+}
+
+// NewParameterError means that the command message received by the
+// ONU was errored. It would be appropriate if an attribute mask
+// were out of range, for example. In practice, this result code is
+// frequently used interchangeably with code 1001. However, the
+// optional attribute and attribute execution masks in the reply
+// messages are only defined for code 1001.
+func NewParameterError(mask uint16, args ...interface{}) OmciErrors {
+	defaultValue := "parameter error"
+	return &ParamError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: ParameterError,
+		},
+		FailureMask: mask,
+	}
+}
+
+type UnknownEntityError struct {
+	OmciError
+}
+
+// NewUnknownEntityError This result means that the managed entity class
+// (bytes 5..6) is not supported by the ONU.
+func NewUnknownEntityError(args ...interface{}) OmciErrors {
+	defaultValue := "unknown managed entity"
+	return &UnknownEntityError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: UnknownEntity,
+		},
+	}
+}
+
+type UnknownInstanceError struct {
+	OmciError
+}
+
+// NewUnknownInstanceError means that the managed entity instance (bytes 7..8)
+// does not exist in the ONU.
+func NewUnknownInstanceError(args ...interface{}) OmciErrors {
+	defaultValue := "unknown managed entity instance"
+	return &UnknownInstanceError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: UnknownInstance,
+		},
+	}
+}
+
+type DeviceBusyError struct {
+	OmciError
+}
+
+// NewDeviceBusyError means that the command could not be processed due
+// to process-related congestion at the ONU. This result code may
+// also be used as a pause indication to the OLT while the ONU
+// conducts a time-consuming operation such as storage of a
+// software image into non-volatile memory.
+func NewDeviceBusyError(args ...interface{}) OmciErrors {
+	defaultValue := "device busy"
+	return &DeviceBusyError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: DeviceBusy,
+		},
+	}
+}
+
+type InstanceExistsError struct {
+	OmciError
+}
+
+// NewInstanceExistsError
+func NewInstanceExistsError(args ...interface{}) OmciErrors {
+	defaultValue := "instance exists"
+	return &InstanceExistsError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: InstanceExists,
+		},
+	}
+}
+
+type AttributeFailureError struct {
+	OmciError
+}
+
+// NewAttributeFailureError means that the ONU already has a managed entity
+// instance that corresponds to the one the OLT is attempting to create.
+func NewAttributeFailureError(args ...interface{}) OmciErrors {
+	defaultValue := "attribute(s) failed or unknown"
+	return &AttributeFailureError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: AttributeFailure,
+		},
+	}
+}
+
+type MessageTruncatedError struct {
+	OmciError
+}
+
+// NewAttributeFailureError means that the ONU already has a managed entity
+// instance that corresponds to the one the OLT is attempting to create.
+func NewMessageTruncatedError(args ...interface{}) OmciErrors {
+	defaultValue := "out-of-space. Cannot fit attribute into message"
+	return &MessageTruncatedError{
+		OmciError: OmciError{
+			err:        genMessage(defaultValue, args...),
+			statusCode: ProcessingError,
+		},
+	}
+}
+
+func genMessage(defaultValue string, args ...interface{}) string {
+	switch len(args) {
+	case 0:
+		return defaultValue
+
+	case 1:
+		switch first := args[0].(type) {
+		case string:
+			// Assume a simple, pre-formatted string
+			return args[0].(string)
+
+		case func() string:
+			// Assume a closure with no other arguments used
+			return first()
+
+		default:
+			panic("Unsupported parameter type")
+		}
+	}
+	return fmt.Sprintf(args[0].(string), args[1:]...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/onu-g.go b/vendor/github.com/cboling/omci/generated/onu-g.go
new file mode 100644
index 0000000..32de3ac
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/onu-g.go
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OnuGClassId ClassID = ClassID(256)
+
+var onugBME *ManagedEntityDefinition
+
+// OnuG (class ID #256)
+//	This ME represents the ONU as equipment. The ONU automatically creates an instance of this ME.
+//	It assigns values to read-only attributes according to data within the ONU itself.
+//
+//	This ME has evolved from the ONT-G of [ITUT G.984.4].
+//
+//	Relationships
+//		In ITU-T GTC based PON applications, all other MEs in this Recommendation are related directly
+//		or indirectly to the ONU-G entity.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Vendor Id
+//			Vendor ID:	This attribute identifies the vendor of the ONU. It is the same as the four most
+//			significant bytes of the ONU serial number as specified in the respective transmission
+//			convergence (TC) layer specification. (R) (mandatory) (4 bytes)
+//
+//		Version
+//			Version:	This attribute identifies the version of the ONU as defined by the vendor. The
+//			character value 0 indicates that version information is not available or applicable. (R)
+//			(mandatory) (14 bytes)
+//
+//		Serial Number
+//			Serial number: The serial number is unique for each ONU. It is defined in the respective TC
+//			layer specification and contains the vendor ID and version number. The first four bytes are an
+//			ASCII-encoded four-letter vendor ID. The second four bytes are a binary encoded serial number,
+//			under the control of the ONU vendor. (R) (mandatory) (8 bytes)
+//
+//		Traffic Management Option
+//			Upon ME instantiation, the ONU sets this attribute to the value that describes its
+//			implementation. The OLT must adapt its model to conform to the ONU's selection. (R) (mandatory)
+//			(1 byte)
+//
+//		Deprecated
+//			Deprecated:	This attribute is not used. If it is present, it should be set to 0. (R) (optional)
+//			(1 byte)
+//
+//		Battery Backup
+//			Battery backup: This Boolean attribute controls whether the ONU performs backup battery
+//			monitoring (assuming it is capable of doing so). False disables battery alarm monitoring; true
+//			enables battery alarm monitoring. (R, W) (mandatory) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by the
+//			ONU as an entirety. Administrative state is further described in clause A.1.6. (R, W)
+//			(mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute reports whether the ME is currently capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Onu Survival Time
+//			ONU survival time: This attribute indicates the minimum guaranteed time in milliseconds between
+//			the loss of external power and the silence of the ONU. This does not include survival time
+//			attributable to a backup battery. The value zero implies that the actual time is not known. (R)
+//			(optional) (1 byte)
+//
+//		Logical Onu Id
+//			Logical ONU ID: This attribute provides a way for the ONU to identify itself. It is a text
+//			string, null terminated if it is shorter than 24 bytes, with a null default value. The mechanism
+//			for creation or modification of this information is beyond the scope of this Recommendation, but
+//			might include, for example, a web page displayed to a user. (R) (optional) (24 bytes)
+//
+//		Logical Password
+//			Logical password: This attribute provides a way for the ONU to submit authentication
+//			credentials. It is a text string, null terminated if it is shorter than 12 bytes, with a null
+//			default value. The mechanism for creation or modification of this information is beyond the
+//			scope of this Recommendation. (R) (optional) (12 bytes)
+//
+//		Credentials Status
+//			Other values are reserved.
+//
+//		Extended Tc_Layer Options
+//			(R) (optional) (2 bytes)
+//
+type OnuG struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	onugBME = &ManagedEntityDefinition{
+		Name:    "OnuG",
+		ClassID: 256,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Reboot,
+			Set,
+			SynchronizeTime,
+			Test,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  Uint32Field("VendorId", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  MultiByteField("Version", 14, nil, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint64Field("SerialNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  ByteField("TrafficManagementOption", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  ByteField("Deprecated", 0, mapset.NewSetWith(Read), false, false, true, true, 5),
+			6:  ByteField("BatteryBackup", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  ByteField("OnuSurvivalTime", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: MultiByteField("LogicalOnuId", 24, nil, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: MultiByteField("LogicalPassword", 12, nil, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: ByteField("CredentialsStatus", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("ExtendedTcLayerOptions", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+		},
+	}
+}
+
+// NewOnuG (class ID 256 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOnuG(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(onugBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/onu2-g.go b/vendor/github.com/cboling/omci/generated/onu2-g.go
new file mode 100644
index 0000000..ebad3be
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/onu2-g.go
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Onu2GClassId ClassID = ClassID(257)
+
+var onu2gBME *ManagedEntityDefinition
+
+// Onu2G (class ID #257)
+//	This ME contains additional attributes associated with a PON ONU. The ONU automatically creates
+//	an instance of this ME. Its attributes are populated according to data within the ONU itself.
+//
+//	This ME is the same as the ONT2-G of [ITUT G.984.4], with extensions.
+//
+//	Relationships
+//		This ME is paired with the ONU-G entity.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Equipment Id
+//			Equipment ID: This attribute may be used to identify the specific type of ONU. In some
+//			environments, this attribute may include the common language equipment identification (CLEI)
+//			code. (R) (optional) (20 bytes)
+//
+//		Optical Network Unit Management And Control Channel Omcc Version
+//			(R) (mandatory) (1 byte)
+//
+//		Vendor Product Code
+//			Vendor product code: This attribute contains a vendor-specific product code for the ONU. (R)
+//			(optional) (2 bytes)
+//
+//		Security Capability
+//			(R) (mandatory) (1 byte)
+//
+//		Security Mode
+//			Upon ME instantiation, the ONU sets this attribute to 1, AES-128. Attribute value 1 does not
+//			imply that any channels are encrypted; that process is negotiated at the PLOAM layer. It only
+//			signifies that the advanced encryption standard (AES) with 128 bit keys is the security mode to
+//			be used on any channels that the OLT may choose to encrypt. (R, W) (mandatory) (1 byte)
+//
+//		Total Priority Queue Number
+//			Total priority queue number: This attribute reports the total number of upstream priority queues
+//			that are not associated with a circuit pack, but with the ONU in its entirety. Upon ME
+//			instantiation, the ONU sets this attribute to the value that represents its capabilities. (R)
+//			(mandatory) (2 bytes)
+//
+//		Total Traffic Scheduler Number
+//			Total traffic scheduler number: This attribute reports the total number of traffic schedulers
+//			that are not associated with a circuit pack, but with the ONU in its entirety. The ONU supports
+//			null function, strict priority scheduling and weighted round robin (WRR) from the priority
+//			control and guarantee of minimum rate control points of view, respectively. If the ONU has no
+//			global traffic schedulers, this attribute is 0. (R) (mandatory) (1 byte)
+//
+//		Deprecated
+//			Deprecated:	This attribute should always be set to 1 by the ONU and ignored by the OLT. (R)
+//			(mandatory) (1 byte)
+//
+//		Total Gem Port_Id Number
+//			Total GEM port-ID number: This attribute reports the total number of GEM port-IDs supported by
+//			the ONU. The maximum value is specified in the corresponding TC recommendations. Upon ME
+//			instantiation, the ONU sets this attribute to the value that represents its capabilities. (R)
+//			(optional) (2 bytes)
+//
+//		Sysuptime
+//			SysUpTime:	This attribute counts 10 ms intervals since the ONU was last initialized. It rolls
+//			over to 0 when full (see [IETF RFC 1213]). (R) (optional) (4 bytes)
+//
+//		Connectivity Capability
+//			(R) (optional) (2 bytes)
+//
+//		Current Connectivity Mode
+//			(R, W) (optional) (1 byte)
+//
+//		Quality Of Service Qos Configuration Flexibility
+//			The ME ID of both the T-CONT and traffic scheduler contains a slot number. Even when attributes
+//			in the above list are RW, it is never permitted to change the slot number in a reference. That
+//			is, configuration flexibility never extends across slots. It is also not permitted to change the
+//			directionality of an upstream queue to downstream or vice versa.
+//
+//		Priority Queue Scale Factor
+//			NOTE 3 – Some legacy implementations may take the queue scale factor from the GEM block length
+//			attribute of the ANI-G ME. That option is discouraged in new implementations.
+//
+type Onu2G struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	onu2gBME = &ManagedEntityDefinition{
+		Name:    "Onu2G",
+		ClassID: 257,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  MultiByteField("EquipmentId", 20, nil, mapset.NewSetWith(Read), false, false, true, false, 1),
+			2:  ByteField("OpticalNetworkUnitManagementAndControlChannelOmccVersion", 0, mapset.NewSetWith(Read), true, false, false, false, 2),
+			3:  Uint16Field("VendorProductCode", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  ByteField("SecurityCapability", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  ByteField("SecurityMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint16Field("TotalPriorityQueueNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  ByteField("TotalTrafficSchedulerNumber", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  ByteField("Deprecated", 0, mapset.NewSetWith(Read), false, false, false, true, 8),
+			9:  Uint16Field("TotalGemPortIdNumber", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: Uint32Field("Sysuptime", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint16Field("ConnectivityCapability", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: ByteField("CurrentConnectivityMode", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("QualityOfServiceQosConfigurationFlexibility", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint16Field("PriorityQueueScaleFactor", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+		},
+	}
+}
+
+// NewOnu2G (class ID 257 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOnu2G(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(onu2gBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/onudata.go b/vendor/github.com/cboling/omci/generated/onudata.go
new file mode 100644
index 0000000..8f52e5c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/onudata.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OnuDataClassId ClassID = ClassID(2)
+
+var onudataBME *ManagedEntityDefinition
+
+// OnuData (class ID #2)
+//	This ME models the MIB itself. Clause I.1.3 explains the use of this ME with respect to MIB
+//	synchronization.
+//
+//	The ONU automatically creates an instance of this ME, and updates the associated attributes
+//	according to data within the ONU itself.
+//
+//	Relationships
+//		One instance of this ME is contained in an ONU.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Mib Data Sync
+//			MIB data sync: This attribute is used to check the alignment of the MIB of the ONU with the
+//			corresponding MIB in the OLT. MIB data sync relies on this attribute, which is a sequence number
+//			that can be checked by the OLT to see if the MIB snapshots for the OLT and ONU match. Refer to
+//			clause I.1.2.1 for a detailed description of this attribute. Upon ME instantiation, the ONU sets
+//			this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+type OnuData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	onudataBME = &ManagedEntityDefinition{
+		Name:    "OnuData",
+		ClassID: 2,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			GetAllAlarms,
+			GetAllAlarmsNext,
+			MibReset,
+			MibUpload,
+			MibUploadNext,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("MibDataSync", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewOnuData (class ID 2 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOnuData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(onudataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/onudynamicpowermanagementcontrol.go b/vendor/github.com/cboling/omci/generated/onudynamicpowermanagementcontrol.go
new file mode 100644
index 0000000..8952268
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/onudynamicpowermanagementcontrol.go
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OnuDynamicPowerManagementControlClassId ClassID = ClassID(336)
+
+var onudynamicpowermanagementcontrolBME *ManagedEntityDefinition
+
+// OnuDynamicPowerManagementControl (class ID #336)
+//	This ME models the ONU's ability to enter power conservation modes in cooperation with the OLT
+//	in an ITU-T G.987 system. [ITUT G.987.3] originally specified two alternative modes, doze and
+//	cyclic sleep. The subsequent revision of [ITUT G.987.3] simplified the specification providing a
+//	single power conservation mode, watchful sleep.
+//
+//	An ONU that supports power conservation modes automatically creates an instance of this ME.
+//
+//	Relationships
+//		One instance of this ME is associated with the ONU ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Power Reduction Management Capability
+//			5..255	Reserved
+//
+//		Power Reduction Management Mode
+//			Power reduction management mode: This attribute enables one or more of the ONU's managed power
+//			conservation modes. It is a bit map in which the bit value 0 disables the mode, while the value
+//			1 enables the mode. Bit assignments are the same as those of the power reduction management
+//			capability attribute. The default value of each bit is 0. (R, W) (mandatory) (1 byte)
+//
+//		Itransinit
+//			Itransinit:	This attribute is the ONU vendor's statement of the complete transceiver
+//			initialization time: the worst-case time required for the ONU to regain full functionality when
+//			leaving the asleep state in cyclic sleep mode or low-power state in watchful sleep mode (i.e.,
+//			turning on both the receiver and the transmitter and acquiring synchronization to the downstream
+//			flow), measured in units of 125 µs frames. The value zero indicates that the sleeping ONU can
+//			respond to a bandwidth grant without delay. (R) (mandatory) (2 bytes)
+//
+//		Itxinit
+//			Itxinit:	This attribute is the ONU vendor's statement of the transmitter initialization time:
+//			the time required for the ONU to regain full functionality when leaving the listen state (i.e.,
+//			turning on the transmitter), measured in units of 125 µs frames. The value zero indicates that
+//			the dozing ONU can respond to a bandwidth grant without delay. If watchful sleep is enabled, the
+//			ONU ignores this attribute. (R) (mandatory) (2 bytes)
+//
+//		Maximum Sleep Interval
+//			Maximum sleep interval: The Isleep/Ilowpower attribute specifies the maximum time the ONU spends
+//			in its asleep, listen, or low-power states, as a count of 125 µs frames. Local or remote events
+//			may truncate the ONU's sojourn in these states. The default value of this attribute is 0. (R, W)
+//			(mandatory) (4 bytes)
+//
+//		Maximum Receiver_Off Interval
+//			Maximum receiver-off interval: The Irxoff attribute specifies the maximum time the OLT can
+//			afford to wait from the moment it decides to wake up an ONU in the low-power state of the
+//			watchful sleep mode until the ONU is fully operational, specified as a count of 125 µs frames.
+//			(R, W) (mandatory) (4 bytes)
+//
+//		Minimum Aware Interval
+//			Minimum aware interval: The Iaware attribute specifies the time the ONU spends in its aware
+//			state, as a count of 125 µs frames, before it re-enters asleep or listen states. Local or remote
+//			events may independently cause the ONU to enter an active state rather than returning to a sleep
+//			state. The default value of this attribute is 0. (R, W) (mandatory) (4 bytes)
+//
+//		Minimum Active Held Interval
+//			Minimum active held interval: The Ihold attribute specifies the minimum time during which the
+//			ONU remains in the active held state, as a count of 125 µs frames. Its initial value is zero.
+//			(R, W) (mandatory) (2 bytes)
+//
+//		Maximum Sleep Interval Extension
+//			(R, W) (optional) (8 bytes)
+//
+//		Ethernet Passive Optical Network Epon Capability Extension
+//			–	Configurations:
+//				ackEnable configuration = enable,
+//				Sleep indication configuration = disable,
+//			Early wake-up configuration = enable
+//
+//		Epon Setup Extension
+//			(R, W) (optional) (1 byte)
+//
+//		Missing Consecutive Bursts Threshold
+//			Missing consecutive bursts threshold: The Clobi attribute specifies the maximum number of
+//			missing consecutive scheduled bursts from the ONU that the OLT is willing to tolerate without
+//			raising an alarm. The value of this attribute defaults to 4. (R, W) (mandatory) (4 bytes)
+//
+type OnuDynamicPowerManagementControl struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	onudynamicpowermanagementcontrolBME = &ManagedEntityDefinition{
+		Name:    "OnuDynamicPowerManagementControl",
+		ClassID: 336,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("PowerReductionManagementCapability", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  ByteField("PowerReductionManagementMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  Uint16Field("Itransinit", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint16Field("Itxinit", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("MaximumSleepInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint32Field("MaximumReceiverOffInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint32Field("MinimumAwareInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  Uint16Field("MinimumActiveHeldInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  Uint64Field("MaximumSleepIntervalExtension", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: ByteField("EthernetPassiveOpticalNetworkEponCapabilityExtension", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: ByteField("EponSetupExtension", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: Uint32Field("MissingConsecutiveBurstsThreshold", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 12),
+		},
+	}
+}
+
+// NewOnuDynamicPowerManagementControl (class ID 336 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOnuDynamicPowerManagementControl(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(onudynamicpowermanagementcontrolBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/onupowershedding.go b/vendor/github.com/cboling/omci/generated/onupowershedding.go
new file mode 100644
index 0000000..62be255
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/onupowershedding.go
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const OnuPowerSheddingClassId ClassID = ClassID(133)
+
+var onupowersheddingBME *ManagedEntityDefinition
+
+// OnuPowerShedding (class ID #133)
+//	This ME models the ONU's ability to shed services when the ONU goes into battery operation mode
+//	after AC power failure. Shedding classes are defined in the following table, which may span
+//	multiple circuit pack types. This feature works in conjunction with the power shed override
+//	attribute of the circuit pack ME, which can selectively prevent power shedding of priority
+//	ports.
+//
+//	An ONU that supports power shedding automatically creates an instance of this ME.
+//
+//	The following table defines the binding of shedding class and PPTP type. The coding is taken
+//	from Table 9.1.5-1. In the case of hybrid circuit pack types, multiple shedding classes may
+//	affect a circuit pack if the hardware is capable of partial power shedding.
+//
+//	An ONU may choose to model its ports with the port-mapping package of clause 9.1.8, rather than
+//	with real or virtual circuit packs. In this case, power shedding pertains to individual PPTPs
+//	(listed in column 2 of the table).
+//
+//	Relationships
+//		One instance of this ME is associated with the ONU ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Restore Power Timer Reset Interval
+//			Restore power timer reset interval: The time delay, in seconds, before resetting the power-
+//			shedding timers after full power restoration. Upon ME instantiation, the ONU sets this attribute
+//			to 0. (R, W) (mandatory) (2 bytes)
+//
+//		Data Class Shedding Interval
+//			Data class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Voice Class Shedding Interval
+//			Voice class shedding interval: This attribute only pertains to voice services that terminate on
+//			the ONU and are under the management control of the OMCI. (R, W) (mandatory) (2 bytes)
+//
+//		Video Overlay Class Shedding Interval
+//			Video overlay class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Video Return Class Shedding Interval
+//			Video return class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Digital Subscriber Line Class Shedding Interval
+//			Digital subscriber line (DSL) class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Atm Class Shedding Interval
+//			ATM class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Ces Class Shedding Interval
+//			CES class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Frame Class Shedding Interval
+//			Frame class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Sdh_Sonet Class Shedding Interval
+//			Sdh-sonet class shedding interval:	(R, W) (mandatory) (2 bytes)
+//
+//		Shedding Status
+//			The ONU sets each bit to 1 when power shedding is active, and clears it to 0 when the service is
+//			restored. (R) (optional) (2 bytes)
+//
+type OnuPowerShedding struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	onupowersheddingBME = &ManagedEntityDefinition{
+		Name:    "OnuPowerShedding",
+		ClassID: 133,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  Uint16Field("RestorePowerTimerResetInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  Uint16Field("DataClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  Uint16Field("VoiceClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  Uint16Field("VideoOverlayClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  Uint16Field("VideoReturnClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint16Field("DigitalSubscriberLineClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint16Field("AtmClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  Uint16Field("CesClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  Uint16Field("FrameClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: Uint16Field("SdhSonetClassSheddingInterval", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: Uint16Field("SheddingStatus", 0, mapset.NewSetWith(Read), true, false, true, false, 11),
+		},
+	}
+}
+
+// NewOnuPowerShedding (class ID 133 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewOnuPowerShedding(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(onupowersheddingBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointcesuni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointcesuni.go
new file mode 100644
index 0000000..3f2320e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointcesuni.go
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointCesUniClassId ClassID = ClassID(12)
+
+var physicalpathterminationpointcesuniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointCesUni (class ID #12)
+//	This ME represents the point at a CES UNI in the ONU where the physical path terminates and
+//	physical level functions are performed.
+//
+//	The ONU automatically creates an instance of this ME per port:
+//
+//	•	when the ONU has CES ports built into its factory configuration;
+//
+//	•	when a cardholder is provisioned to expect a circuit pack of a CES type;
+//
+//	•	when a cardholder provisioned for plug-and-play is equipped with a circuit pack of a CES type.
+//	Note that the installation of a plug-and-play card may indicate the presence of CES ports via
+//	equipment ID as well as its type and indeed may cause the ONU to instantiate a port-mapping
+//	package that specifies CES ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect a CES circuit pack, nor is it equipped with a CES circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each real or pre-provisioned CES port. It can be
+//		linked from a GEM IW TP, a pseudowire TP or a logical N × 64 kbit/s CTP.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Expected Type
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+//		Sensed Type
+//			Sensed type:	If the value of expected type is not 0, then the value of sensed type equals the
+//			value of expected type. If expected type = 0, then the value of sensed type is one of the
+//			compatible values from Table 9.1.5-1. Upon ME instantiation, the ONU sets this attribute to 0 or
+//			to the value that reflects the physically present equipment. (R) (mandatory if the ONU supports
+//			circuit packs with configurable interface types, e.g., C1.5/2/6.3) (1 byte)
+//
+//		Ces Loopback Configuration
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Framing
+//			Upon ME instantiation, the ONU sets this attribute to a value that reflects the vendor's
+//			default. (R, W) (optional) (1 byte)
+//
+//		Encoding
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory for DS1 and DS3
+//			interfaces) (1 byte)
+//
+//		Line Length
+//			Line length:	This attribute specifies the length of the twisted pair cable from a DS1 physical
+//			UNI to the DSX-1 cross-connect point or the length of coaxial cable from a DS3 physical UNI to
+//			the DSX-3 cross-connect point. Valid values are given in Table 9.8.1-1. Upon ME instantiation
+//			for a DS1 interface, the ONU assigns the value 0 for non-power feed type DS1 and the value 6 for
+//			power feed type DS1. Upon ME instantiation for a DS3 interface, the ONU sets this attribute to
+//			0x0F. (R, W) (optional) (1 byte)
+//
+//		Ds1 Mode
+//			In the event of conflicting values between this attribute and the (also optional) line length
+//			attribute, the line length attribute is taken to be valid. This permits the separation of line
+//			build-out (LBO) and power settings from smart jack and FDL behaviour. Upon ME instantiation, the
+//			ONU sets this attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Line Type
+//			(R, W) (mandatory for DS3, E3 and multi-configuration interfaces, not applicable to other
+//			interfaces) (1 byte)
+//
+type PhysicalPathTerminationPointCesUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointcesuniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointCesUni",
+		ClassID: 12,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("ExpectedType", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("SensedType", 0, mapset.NewSetWith(Read), true, false, false, false, 2),
+			3:  ByteField("CesLoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), true, false, false, false, 3),
+			4:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 5),
+			6:  ByteField("Framing", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("Encoding", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("LineLength", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  ByteField("Ds1Mode", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 10),
+			11: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("LineType", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 12),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointCesUni (class ID 12 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointCesUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointcesuniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointethernetuni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointethernetuni.go
new file mode 100644
index 0000000..3f51e0f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointethernetuni.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointEthernetUniClassId ClassID = ClassID(11)
+
+var physicalpathterminationpointethernetuniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointEthernetUni (class ID #11)
+//	This ME represents the point at an Ethernet UNI where the physical path terminates and Ethernet
+//	physical level functions are performed.
+//
+//	The ONU automatically creates an instance of this ME per port:
+//
+//	•	when the ONU has Ethernet ports built into its factory configuration;
+//
+//	•	when a cardholder is provisioned to expect a circuit pack of the Ethernet type;
+//
+//	•	when a cardholder provisioned for plug-and-play is equipped with a circuit pack of the
+//	Ethernet type. Note that the installation of a plug-and-play card may indicate the presence of
+//	Ethernet ports via equipment ID as well as its type, and indeed may cause the ONU to instantiate
+//	a port-mapping package that specifies Ethernet ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect an Ethernet circuit pack, nor is it equipped with an Ethernet circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each instance of a pre-provisioned or real Ethernet
+//		port.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Expected Type
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+//		Sensed Type
+//			(R) (mandatory if the ONU supports circuit packs with configurable interface types, e.g., 10/100
+//			BASE-T card) (1 byte)
+//
+//		Auto Detection Configuration
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory for interfaces with
+//			autodetection options) (1 byte)
+//
+//		Ethernet Loopback Configuration
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Configuration Ind
+//			The value 0 indicates that the configuration status is unknown (e.g., Ethernet link is not
+//			established or the circuit pack is not yet installed). Upon ME instantiation, the ONU sets this
+//			attribute to 0. (R) (mandatory) (1 byte)
+//
+//		Max Frame Size
+//			Max frame size: This attribute denotes the maximum frame size allowed across this interface.
+//			Upon ME instantiation, the ONU sets the attribute to 1518. (R, W) (mandatory for G-PON, optional
+//			for ITU-T G.986 systems) (2 bytes)
+//
+//		Dte Or Dce Ind
+//			(R, W) (mandatory) (1 byte)
+//
+//		Pause Time
+//			Pause time:	This attribute allows the PPTP to ask the subscriber terminal to temporarily suspend
+//			sending data. Units are in pause quanta (1 pause quantum is 512 bit times of the particular
+//			implementation). Values: 0..0xFFFF. Upon ME instantiation, the ONU sets this attribute to 0.
+//			(R, W) (optional) (2 bytes)
+//
+//		Bridged Or Ip Ind
+//			Upon ME instantiation, the ONU sets this attribute to 2. (R, W) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Pppoe Filter
+//			PPPoE filter: This attribute controls filtering of PPPoE packets on this Ethernet port. The
+//			value 0 allows packets of all types. The value 1 discards everything but PPPoE packets. The
+//			default value is 0. (R, W) (optional) (1 byte)
+//
+//		Power Control
+//			NOTE – This attribute is the equivalent of the acPSEAdminControl variable defined in clause
+//			30.9.1.2.1 of [IEEE 802.3]. Other variables related to PoE appear in the PoE control ME.
+//
+type PhysicalPathTerminationPointEthernetUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointethernetuniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointEthernetUni",
+		ClassID: 11,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("ExpectedType", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("SensedType", 0, mapset.NewSetWith(Read), true, false, false, false, 2),
+			3:  ByteField("AutoDetectionConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  ByteField("EthernetLoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 6),
+			7:  ByteField("ConfigurationInd", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("MaxFrameSize", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("DteOrDceInd", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: Uint16Field("PauseTime", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("BridgedOrIpInd", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 12),
+			13: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: ByteField("PppoeFilter", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: ByteField("PowerControl", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointEthernetUni (class ID 11 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointEthernetUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointethernetuniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointlctuni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointlctuni.go
new file mode 100644
index 0000000..9287fdf
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointlctuni.go
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointLctUniClassId ClassID = ClassID(83)
+
+var physicalpathterminationpointlctuniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointLctUni (class ID #83)
+//	This ME models debug access to the ONU from any physical or logical port, for example, via a
+//	dedicated LCT UNI, via ordinary subscriber UNIs, or via the IP host config ME.
+//
+//	The ONU automatically creates an instance of this ME per port:
+//
+//	•	when the ONU has an LCT port built into its factory configuration;
+//
+//	•	when a cardholder is provisioned to expect a circuit pack of the LCT type;
+//
+//	•	when a cardholder provisioned for plug-and-play is equipped with a circuit pack of the LCT
+//	type;
+//
+//	NOTE – The installation of a plug-and-play card may indicate the presence of LCT ports via
+//	equipment ID as well as its type, and indeed may cause the ONU to instantiate a port-mapping
+//	package that specifies LCT ports.
+//
+//	•	when the ONU supports debug access through some other physical or logical means.
+//
+//	The ONU automatically deletes an instance of this ME when a cardholder is neither provisioned to
+//	expect an LCT circuit pack, nor is it equipped with an LCT circuit pack, or if the ONU is
+//	reconfigured in such a way that it no longer supports debug access.
+//
+//	LCT instances are not reported during an MIB upload.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a real or virtual circuit pack ME
+//		classified as an LCT type. An instance of this ME may also be associated with the ONU as a
+//		whole, if the ONU supports debug access through means other than a dedicated physical LCT port.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. If the LCT UNI is
+//			associated with the ONU as a whole, its ME ID should be 0. (R) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is described generically in clause A.1.6. The LCT has additional
+//			administrative state behaviour. When the administrative state is set to lock, debug access
+//			through all physical or logical means is blocked, except that the operation of a possible ONU
+//			remote debug ME is not affected. Administrative lock of ME instance 0 overrides administrative
+//			lock of any other PPTP LCT UNIs that may exist. (R, W) (mandatory) (1 byte)
+//
+type PhysicalPathTerminationPointLctUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointlctuniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointLctUni",
+		ClassID: 83,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointLctUni (class ID 83 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointLctUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointlctuniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointmocauni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointmocauni.go
new file mode 100644
index 0000000..29ab438
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointmocauni.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointMocaUniClassId ClassID = ClassID(162)
+
+var physicalpathterminationpointmocauniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointMocaUni (class ID #162)
+//	This ME represents an MoCA UNI, where physical paths terminate and physical path level functions
+//	are performed.
+//
+//	The ONU automatically creates an instance of this ME per port as follows.
+//
+//	•	When the ONU has MoCA ports built into its factory configuration.
+//
+//	•	When a cardholder is provisioned to expect a circuit pack of the MoCA type.
+//
+//	•	When a cardholder provisioned for plug-and-play is equipped with a circuit pack of the MoCA
+//	type. Note that the installation of a plug-and-play card may indicate the presence of MoCA ports
+//	via equipment ID as well as its type, and indeed may cause the ONU to instantiate a port-mapping
+//	package that specifies MoCA ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect an MoCA circuit pack, nor is it equipped with an MoCA circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each real or pre-provisioned MoCA port.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number is directly associated with the physical position of the UNI. The first byte is the slot
+//			ID (defined in clause 9.1.5). The second byte is the port ID, with the range 1..255. (R)
+//			(mandatory) (2 bytes)
+//
+//		Loopback Configuration
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Max Frame Size
+//			Max frame size: This attribute denotes the maximum frame size allowed across this interface.
+//			Upon ME instantiation, the ONU sets this attribute to 1518. (R, W) (mandatory) (2 bytes)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Pppoe Filter
+//			PPPoE filter: This attribute controls filtering of PPPoE packets on this MoCA port. When its
+//			value is 1, all packets other than PPPoE packets are discarded. The default 0 accepts packets of
+//			all types. (R, W) (optional) (1 byte)
+//
+//		Network Status
+//			(R) (mandatory) (1 byte)
+//
+//		Password
+//			Password:	This attribute specifies the MoCA encryption key. It is an ASCII string of 17 decimal
+//			digits. Upon ME instantiation, the ONU sets this attribute to 17 null bytes. (R, W) (mandatory)
+//			(17 bytes)
+//
+//		Privacy Enabled
+//			Privacy enabled: This attribute activates (1) link-layer security. The default value 0
+//			deactivates it. (R, W) (mandatory) (1 byte)
+//
+//		Minimum Bandwidth Alarm Threshold
+//			Minimum bandwidth alarm threshold: This attribute specifies the minimum desired PHY link
+//			bandwidth between two nodes. If the actual bandwidth is lower, an LL alarm is declared. Valid
+//			values are 0 to 0x0410 (260 Mbit/s) in 0.25 Mbit/s increments. The default value is 0x02D0
+//			(180 Mbit/s). The value 0 disables the threshold. (R, W) (optional) (2 bytes)
+//
+//		Frequency Mask
+//			Frequency mask: This attribute is a bit map of the centre frequencies that the interface is
+//			permitted to use, where each bit represents a centre frequency. The LSB (b[1]) corresponds to
+//			centre frequency 800 MHz. The next significant bit (b[2]) corresponds to centre frequency
+//			825 MHz. The 28th bit (b[28]) corresponds to centre frequency 1500 MHz. The four MSBs are not
+//			used. (R, W) (optional) (4 bytes)
+//
+//		Rf Channel
+//			RF channel:	This attribute reports the frequency to which the MoCA interface is currently tuned,
+//			in megahertz. (R) (mandatory) (2 bytes)
+//
+//		Last Operational Frequency
+//			Last operational frequency: This attribute reports the frequency to which the MoCA interface was
+//			tuned when last operational, in megahertz. (R) (mandatory) (2 bytes)
+//
+type PhysicalPathTerminationPointMocaUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointmocauniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointMocaUni",
+		ClassID: 162,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("LoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 1),
+			2:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 3),
+			4:  Uint16Field("MaxFrameSize", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 5),
+			6:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("PppoeFilter", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  ByteField("NetworkStatus", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  MultiByteField("Password", 17, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: ByteField("PrivacyEnabled", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: Uint16Field("MinimumBandwidthAlarmThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: Uint32Field("FrequencyMask", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("RfChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint16Field("LastOperationalFrequency", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointMocaUni (class ID 162 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointMocaUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointmocauniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointpotsuni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointpotsuni.go
new file mode 100644
index 0000000..8264200
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointpotsuni.go
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointPotsUniClassId ClassID = ClassID(53)
+
+var physicalpathterminationpointpotsuniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointPotsUni (class ID #53)
+//	This ME represents a POTS UNI in the ONU, where a physical path terminates and physical path
+//	level functions (analogue telephony) are performed.
+//
+//	The ONU automatically creates an instance of this ME per port as follows.
+//
+//	•	When the ONU has POTS ports built into its factory configuration.
+//
+//	•	When a cardholder is provisioned to expect a circuit pack of the POTS type.
+//
+//	•	When a cardholder provisioned for plug-and-play is equipped with a circuit pack of the POTS
+//	type. Note that the installation of a plug-and-play card may indicate the presence of POTS ports
+//	via equipment ID as well as type, and indeed may cause the ONU to instantiate a port-mapping
+//	package that specifies POTS ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect a POTS circuit pack, nor is it equipped with a POTS circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each real or pre-provisioned POTS port. Either a SIP
+//		or a VoIP voice CTP links to the POTS UNI. Status is available from a VoIP line status ME, and
+//		RTP and call control PM may be collected on this point.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			When the administrative state is set to lock, all user functions of this UNI are blocked, and
+//			alarms, TCAs and AVCs for this ME and all dependent MEs are no longer generated. Selection of a
+//			default value for this attribute is outside the scope of this Recommendation. (R, W) (mandatory)
+//			(1 byte)
+//
+//		Deprecated
+//			Deprecated: This attribute is not used and should not be supported. (R, W) (optional) (2 bytes)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Impedance
+//			where C1, R1, and R2 are related as shown in Figure 9.9.1-1. Upon ME instantiation, the ONU sets
+//			this attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Transmission Path
+//			Transmission path: This attribute allows setting the POTS UNI either to full-time on-hook
+//			transmission (0) or part-time on-hook transmission (1). Upon ME instantiation, the ONU sets this
+//			attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Rx Gain
+//			Rx gain:	This attribute specifies a gain value for the received signal in the form of a 2s
+//			complement number. Valid values are –120 (12.0 dB) to 60 (+6.0 dB). The direction of the
+//			affected signal is in the D to A direction, towards the telephone set. Upon ME instantiation,
+//			the ONU sets this attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Tx Gain
+//			Tx gain:	This attribute specifies a gain value for the transmit signal in the form of a 2s
+//			complement number. Valid values are –120 (12.0 dB) to 60 (+6.0 dB). The direction of the
+//			affected signal is in the A to D direction, away from the telephone set. Upon ME instantiation,
+//			the ONU sets this attribute to 0. (R, W) (optional) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Hook State
+//			Hook state:	This attribute indicates the current state of the subscriber line: 0 = on hook, 1 =
+//			off hook (R) (optional) (1 byte)
+//
+//		Pots Holdover Time
+//			POTS holdover time: This attribute determines the time during which the POTS loop voltage is
+//			held up when a LOS or softswitch connectivity is detected (please refer to the following table
+//			for description of behaviours).. After the specified time elapses, the ONU drops the loop
+//			voltage, and may thereby cause premises intrusion alarm or fire panel circuits to go active.
+//			When the ONU ranges successfully on the PON or softswitch connectivity is restored, it restores
+//			the POTS loop voltage immediately and resets the timer to zero. The attribute is expressed in
+//			seconds. The default value 0 selects the vendor's factory policy. (R, W) (optional) (2 bytes)
+//
+//		Nominal Feed Voltage
+//			Nominal feed voltage: This attribute indicates the designed nominal feed voltage of the POTS
+//			loop. It is an absolute value with resolution 1 V. This attribute does not represent the actual
+//			voltage measured on the loop, which is available through the test command. (R, W) (optional)
+//			(1 byte)
+//
+//		Loss Of Softswitch
+//			Loss of softswitch: This Boolean attribute controls whether the T/R holdover initiation
+//			criteria. False disables loss of softswitch connectivity detection as criteria for initiating
+//			the POTS holdover timer. True enables loss of softswitch connectivity detection as criteria for
+//			initiating the POTS holdover timer. This attribute is optional (if not implemented, the POTS
+//			holdover time is triggered on a LOS when POTS holdover is greater than zero). (R, W) (optional)
+//			(1 byte)
+//
+type PhysicalPathTerminationPointPotsUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointpotsuniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointPotsUni",
+		ClassID: 53,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+			Test,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), true, false, false, false, 1),
+			2:  Uint16Field("Deprecated", 0, mapset.NewSetWith(Read, Write), false, false, true, true, 2),
+			3:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 3),
+			4:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  ByteField("Impedance", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  ByteField("TransmissionPath", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("RxGain", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  ByteField("TxGain", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 9),
+			10: ByteField("HookState", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint16Field("PotsHoldoverTime", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("NominalFeedVoltage", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: ByteField("LossOfSoftswitch", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointPotsUni (class ID 53 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointPotsUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointpotsuniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideoani.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideoani.go
new file mode 100644
index 0000000..494121d
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideoani.go
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointVideoAniClassId ClassID = ClassID(90)
+
+var physicalpathterminationpointvideoaniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointVideoAni (class ID #90)
+//	This ME represents an RF video ANI in the ONU, where physical paths terminate and physical path
+//	level functions are performed.
+//
+//	The ONU automatically creates an instance of this ME per port as follows.
+//
+//	•	When the ONU has video ANI ports built into its factory configuration.
+//
+//	•	When a cardholder is provisioned to expect a circuit pack of the video ANI type.
+//
+//	•	When a cardholder provisioned for plug-and-play is equipped with a circuit pack of the video
+//	ANI type. Note that the installation of a plug-and-play card may indicate the presence of video
+//	ANI ports via equipment ID as well as its type, and indeed may cause the ONU to instantiate a
+//	port-mapping package that specifies video ANI ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect a video ANI circuit pack, nor is it equipped with a video ANI circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each instance of a real or pre-provisioned video ANI
+//		port.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the ANI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Frequency Range Low
+//			(R) (mandatory) (1 byte)
+//
+//		Frequency Range High
+//			(R) (mandatory) (1 byte)
+//
+//		Signal Capability
+//			(R) (mandatory) (1 byte)
+//
+//		Optical Signal Level
+//			(R) (optional) (1 byte)
+//
+//		Pilot Signal Level
+//			(R) (optional) (1 byte)
+//
+//		Signal Level Min
+//			Signal level min: This attribute indicates the minimum optical RF power per channel that results
+//			in a CNR of 47 dBc for a channel of 4.5 MHz bandwidth at a receive optical power of –5 dBm. The
+//			unit of this attribute is decibel-microwatt optical. (R) (mandatory) (1 byte)
+//
+//		Signal Level Max
+//			Signal level max: This attribute indicates the maximum optical RF power per channel that results
+//			in a CTB of –57 dBc for an 80-channel ensemble of carriers at a perchannel optical modulation
+//			index (OMI) of 3.5%. The unit of this attribute is decibel-microwatt optical. (R) (mandatory)
+//			(1 byte)
+//
+//		Pilot Frequency
+//			(R, W) (optional) (4 bytes)
+//
+//		Agc Mode
+//			(R, W) (optional) (1 byte)
+//
+//		Agc Setting
+//			(R, W) (optional) (1 byte)
+//
+//		Video Lower Optical Threshold
+//			NOTE – Because the power measurement returned in the optical signal level attribute has a
+//			resolution of 1 dB, it is possible that the measured value could appear to be in-range, even
+//			though an out-of-range alarm has been declared against a threshold with 0.1 dB resolution.
+//
+//		Video Upper Optical Threshold
+//			Video upper optical threshold: This attribute specifies the optical level used to declare the
+//			video OOR high alarm. Valid values are –12 to +6 dBm in 0.1 dB increments, represented as a 2s
+//			complement integer. (Coding –120 to +60, 0x00 = 0 dBm, 0x88 = –12.0 dBm, etc.) Upon ME
+//			instantiation, the ONU sets this attribute to 0x19 (+2.5 dBm). (R, W) (optional) (1 byte)
+//
+type PhysicalPathTerminationPointVideoAni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointvideoaniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointVideoAni",
+		ClassID: 90,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 2),
+			3:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 3),
+			4:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  ByteField("FrequencyRangeLow", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  ByteField("FrequencyRangeHigh", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  ByteField("SignalCapability", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  ByteField("OpticalSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  ByteField("PilotSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: ByteField("SignalLevelMin", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: ByteField("SignalLevelMax", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("PilotFrequency", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: ByteField("AgcMode", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: ByteField("AgcSetting", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: ByteField("VideoLowerOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+			16: ByteField("VideoUpperOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointVideoAni (class ID 90 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointVideoAni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointvideoaniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideouni.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideouni.go
new file mode 100644
index 0000000..83e5337
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointvideouni.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointVideoUniClassId ClassID = ClassID(82)
+
+var physicalpathterminationpointvideouniBME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointVideoUni (class ID #82)
+//	This ME represents an RF video UNI in the ONU, where physical paths terminate and physical path
+//	level functions are performed.
+//
+//	The ONU automatically creates an instance of this ME per port:
+//
+//	•	when the ONU has RF video UNI ports built into its factory configuration;
+//
+//	•	when a cardholder is provisioned to expect a circuit pack of the video UNI type;
+//
+//	•	when a cardholder provisioned for plug-and-play is equipped with a circuit pack of the video
+//	UNI type. Note that the installation of a plug-and-play card may indicate the presence of video
+//	ports via equipment ID as well as its type, and indeed may cause the ONU to instantiate a port-
+//	mapping package that specifies video ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect a video circuit pack, nor is it equipped with a video circuit pack.
+//
+//	Relationships
+//		One or more instances of this ME are associated with an instance of a real or virtual circuit
+//		pack classified as video type.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The first byte is the slot ID (defined in
+//			clause 9.1.5). The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Power Control
+//			Power control: This attribute controls whether power is provided from the ONU to an external
+//			equipment over the video PPTP. Value 1 enables power over coaxial cable. The default value 0
+//			disables power feed. (R, W) (optional) (1 byte)
+//
+type PhysicalPathTerminationPointVideoUni struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointvideouniBME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointVideoUni",
+		ClassID: 82,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 2),
+			3: ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 3),
+			4: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: ByteField("PowerControl", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointVideoUni (class ID 82 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointVideoUni(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointvideouniBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart1.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart1.go
new file mode 100644
index 0000000..d6420dc
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart1.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointXdslUniPart1ClassId ClassID = ClassID(98)
+
+var physicalpathterminationpointxdslunipart1BME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointXdslUniPart1 (class ID #98)
+//	This ME represents the point where physical paths terminate on an xDSL CO modem (xTU-C). The
+//	xDSL ME family is used for ADSL VDSL2 and FAST services. A legacy family of VDSL MEs remains
+//	valid for ITUT G.993.1 VDSL, if needed. It is documented in [ITUT G.983.2].
+//
+//	The ONU automatically creates an instance of this ME per port:
+//
+//	•	when the ONU has xDSL ports built into its factory configuration;
+//
+//	•	when a cardholder is provisioned to expect a circuit pack of the xDSL type;
+//
+//	•	when a cardholder provisioned for plug-and-play is equipped with a circuit pack of the xDSL
+//	type. Note that the installation of a plug-and-play card may indicate the presence of xDSL ports
+//	via equipment ID as well as its type, and indeed may cause the ONU to instantiate a port-mapping
+//	package that specifies xDSL ports.
+//
+//	The ONU automatically deletes instances of this ME when a cardholder is neither provisioned to
+//	expect an xDSL circuit pack, nor is it equipped with an xDSL circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with each instance of a real or pre-provisioned xDSL port.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID:	This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical position of the UNI. The six LSBs of the first byte are the slot
+//			ID, defined in clause 9.1.5. The two MSBs indicate the channel number in some of the implicitly
+//			linked MEs, and must be 0 in the PPTP itself. This reduces the possible number of physical slots
+//			to 64. The second byte is the port ID, with the range 1..255. (R) (mandatory) (2 bytes)
+//
+//		Loopback Configuration
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Xdsl Line Configuration Profile
+//			xDSL line configuration profile: This attribute points to an instance of the xDSL line
+//			configuration profiles (part 1, 2 and 3) MEs, and if necessary, also to VDSL2 line configuration
+//			extensions (1 and 2) MEs, also to vectoring line configuration extension MEs. Upon ME
+//			instantiation, the ONU sets this attribute to 0, a null pointer. (R, W) (mandatory) (2 bytes)
+//
+//		Xdsl Subcarrier Masking Downstream Profile
+//			xDSL subcarrier masking downstream profile: This attribute points to an instance of the xDSL
+//			subcarrier masking downstream profile ME. Upon ME instantiation, the ONU sets this attribute to
+//			0, a null pointer. (R, W) (mandatory) (2 bytes)
+//
+//		Xdsl Subcarrier Masking Upstream Profile
+//			xDSL subcarrier masking upstream profile: This attribute points to an instance of the xDSL
+//			subcarrier masking upstream profile ME. Upon ME instantiation, the ONU sets this attribute to 0,
+//			a null pointer. (R, W) (mandatory) (2 bytes)
+//
+//		Xdsl Downstream Power Spectral Density Psd Mask Profile
+//			xDSL downstream power spectral density (PSD) mask profile: This attribute points to an instance
+//			of the xDSL PSD mask profile ME that defines downstream parameters. Upon ME instantiation, the
+//			ONU sets this attribute to 0, a null pointer. (R, W) (mandatory) (2 bytes)
+//
+//		Xdsl Downstream Rfi Bands Profile
+//			xDSL downstream RFI bands profile: This attribute points to an instance of the xDSL downstream
+//			RFI bands profile ME. Upon ME instantiation, the ONU sets this attribute to 0, a null pointer.
+//			(R, W) (mandatory) (2 bytes)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Modem Type
+//			NOTE – Many newer VDSL2 chip sets support only PTM. The ATM default is retained for backward
+//			compatibility, but implementers should be aware that the default may need to be overridden by
+//			provisioning before the xDSL UNI can be brought into service.
+//
+//		Upstream Psd Mask Profile
+//			Upstream PSD mask profile: This attribute points to an instance of the xDSL PSD mask profile
+//			that defines upstream parameters. Upon ME instantiation, the ONU sets this attribute to 0, a
+//			null pointer. (R, W) (optional) (2 bytes)
+//
+//		Network Specific Extensions Pointer
+//			Network specific extensions pointer: This attribute points to a network address ME that contains
+//			the path and name of a file containing network specific parameters for the associated UNI. Upon
+//			ME instantiation, the ONU sets this attribute to 0xFFFF, a null pointer. (R, W) (optional)
+//			(2 bytes)
+//
+type PhysicalPathTerminationPointXdslUniPart1 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointxdslunipart1BME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointXdslUniPart1",
+		ClassID: 98,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("LoopbackConfiguration", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 3),
+			4:  Uint16Field("XdslLineConfigurationProfile", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  Uint16Field("XdslSubcarrierMaskingDownstreamProfile", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint16Field("XdslSubcarrierMaskingUpstreamProfile", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint16Field("XdslDownstreamPowerSpectralDensityPsdMaskProfile", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  Uint16Field("XdslDownstreamRfiBandsProfile", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 9),
+			10: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("ModemType", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: Uint16Field("UpstreamPsdMaskProfile", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("NetworkSpecificExtensionsPointer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointXdslUniPart1 (class ID 98 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointXdslUniPart1(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointxdslunipart1BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart2.go b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart2.go
new file mode 100644
index 0000000..47eabb2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/physicalpathterminationpointxdslunipart2.go
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PhysicalPathTerminationPointXdslUniPart2ClassId ClassID = ClassID(99)
+
+var physicalpathterminationpointxdslunipart2BME *ManagedEntityDefinition
+
+// PhysicalPathTerminationPointXdslUniPart2 (class ID #99)
+//	This ME represents the point in the ONU where physical paths terminate on an xDSL CO modem
+//	(xTU-C). Standards and chip sets support several forms of DSL, including VDSL2, and the xDSL ME
+//	family is used for all of them, with specific extensions for technology variations.
+//
+//	The ONU creates or deletes an instance of this ME at the same time it creates or deletes the
+//	corresponding PPTP xDSL UNI part 1.
+//
+//	Relationships
+//		An instance of this ME is associated with each instance of a PPTP xDSL UNI part 1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 0 Downstream
+//			xDSL channel configuration profile for bearer channel 0 downstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 1 Downstream
+//			xDSL channel configuration profile for bearer channel 1 downstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 2 Downstream
+//			xDSL channel configuration profile for bearer channel 2 downstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 3 Downstream
+//			xDSL channel configuration profile for bearer channel 3 downstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 0 Upstream
+//			xDSL channel configuration profile for bearer channel 0 upstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 1 Upstream
+//			xDSL channel configuration profile for bearer channel 1 upstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 2 Upstream
+//			xDSL channel configuration profile for bearer channel 2 upstream:	(R, W) (optional) (2 bytes)
+//
+//		Xdsl Channel Configuration Profile For Bearer Channel 3 Upstream
+//			xDSL channel configuration profile for bearer channel 3 upstream:	(R, W) (optional) (2 bytes)
+//
+type PhysicalPathTerminationPointXdslUniPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	physicalpathterminationpointxdslunipart2BME = &ManagedEntityDefinition{
+		Name:    "PhysicalPathTerminationPointXdslUniPart2",
+		ClassID: 99,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("XdslChannelConfigurationProfileForBearerChannel0Downstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 1),
+			2: Uint16Field("XdslChannelConfigurationProfileForBearerChannel1Downstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 2),
+			3: Uint16Field("XdslChannelConfigurationProfileForBearerChannel2Downstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4: Uint16Field("XdslChannelConfigurationProfileForBearerChannel3Downstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: Uint16Field("XdslChannelConfigurationProfileForBearerChannel0Upstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6: Uint16Field("XdslChannelConfigurationProfileForBearerChannel1Upstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7: Uint16Field("XdslChannelConfigurationProfileForBearerChannel2Upstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8: Uint16Field("XdslChannelConfigurationProfileForBearerChannel3Upstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewPhysicalPathTerminationPointXdslUniPart2 (class ID 99 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPhysicalPathTerminationPointXdslUniPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(physicalpathterminationpointxdslunipart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/priorityqueue.go b/vendor/github.com/cboling/omci/generated/priorityqueue.go
new file mode 100644
index 0000000..673f25a
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/priorityqueue.go
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PriorityQueueClassId ClassID = ClassID(277)
+
+var priorityqueueBME *ManagedEntityDefinition
+
+// PriorityQueue (class ID #277)
+//	NOTE 1 – In [ITU-T G.984.4], this is called a priority queue-G.
+//
+//	This ME specifies the priority queue used by a GEM port network CTP in the upstream direction.
+//	The upstream priority queue ME is also related to a T-CONT ME. By default, this relationship is
+//	fixed by the ONU hardware architecture, but some ONUs may also permit the relationship to be
+//	configured through the OMCI, as indicated by the QoS configuration flexibility attribute of the
+//	ONU2G ME.
+//
+//	In the downstream direction, priority queues are associated with UNIs. Again, the association is
+//	fixed by default, but some ONUs may permit the association to be configured through the OMCI.
+//
+//	If an ONU as a whole contains priority queues, it instantiates these queues autonomously.
+//	Priority queues may also be localized to pluggable circuit packs, in which case the ONU creates
+//	and deletes them in accordance with circuit pack pre-provisioning and the equipped
+//	configuration.
+//
+//	The OLT can find all the queues by reading the priority queue ME instances. If the OLT tries to
+//	retrieve a non-existent priority queue, the ONU denies the get action with an error indication.
+//
+//	See also Appendix II.
+//
+//	Priority queues can exist in the ONU core and circuit packs serving both UNI and ANI functions.
+//	Therefore, they can be indirectly created and destroyed through cardholder provisioning actions.
+//
+//	In the upstream direction, the weight attribute permits the configuring of an optional traffic
+//	scheduler. Several attributes support back pressure operation, whereby a back-pressure signal is
+//	sent backwards and causes the attached terminal to temporarily suspend sending data.
+//
+//	In the downstream direction, strict priority discipline among the queues serving a given UNI is
+//	the default, with priorities established through the related port attribute. If two or more non-
+//	empty queues have the same priority, capacity is allocated among them in proportion to their
+//	weights. Note that the details of the downstream model differ from those of the upstream model.
+//
+//	The yellow packet drop thresholds specify the drop probability for a packet that has been marked
+//	yellow (drop eligible) by a traffic descriptor or by external equipment such as a residential
+//	gateway (RG). If the current average queue occupancy is less than the minimum threshold, the
+//	yellow packet drop probability is zero. If the current average queue occupancy is greater than
+//	or equal to the maximum threshold, the yellow packet drop probability is one. The yellow drop
+//	probability increases linearly between 0 and max_p as the current average queue occupancy
+//	increases from the minimum to the maximum threshold.
+//
+//	The same model can be configured for green packets, those regarded as being within the traffic
+//	contract.
+//
+//	Drop precedence colour marking indicates the method by which a packet is marked as drop eligible
+//	(yellow). For discard eligibility indicator (DEI) and priority code point (PCP) marking, a drop
+//	eligible indicator is equivalent to yellow colour; otherwise, the colour is green. For
+//	differentiated services code point (DSCP) assured forwarding (AF) marking, the lowest drop
+//	precedence is equivalent to green; otherwise, the colour is yellow.
+//
+//	Relationships
+//		One or more instances of this ME are associated with the ONU-G ME to model upstream priority
+//		queues if the traffic management option attribute in the ONU-G ME is 0 or 2.////		One or more instances of this ME are associated with a PPTP UNI ME as downstream priority
+//		queues. Downstream priority queues may or may not be provided for a virtual Ethernet interface
+//		point (VEIP).
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The MSB
+//			represents the direction (1: upstream, 0: downstream). The 15 LSBs represent a queue ID. The
+//			queue ID is numbered in ascending order by the ONU itself. It is strongly encouraged that the
+//			queue ID be formulated to simplify finding related queues. One way to do this is to number the
+//			queues such that the related port attributes are in ascending order (for the downstream and
+//			upstream queues separately). The range of downstream queue ids is 0 to 0x7FFF and the range of
+//			upstream queue ids is 0x8000 to 0xFFFF. (R) (mandatory) (2 bytes)
+//
+//		Queue Configuration Option
+//			Queue configuration option: This attribute identifies the buffer partitioning policy. The value
+//			1 means that several queues share one buffer of maximum queue size, while the value 0 means that
+//			each queue has an individual buffer of maximum queue size. (R) (mandatory) (1 byte)
+//
+//		Maximum Queue Size
+//			NOTE 2 – In this and the other similar attributes of the priority queue ME, some legacy
+//			implementations may take the queue scale factor from the GEM block length attribute of the ANI-G
+//			ME. This option is discouraged in new implementations.
+//
+//		Allocated Queue Size
+//			Allocated queue size: This attribute identifies the allocated size of this queue, in bytes,
+//			scaled by the priority queue scale factor attribute of the ONU2G. (R, W) (mandatory) (2 bytes)
+//
+//		Discard_Block Counter Reset Interval
+//			Discard-block counter reset interval: This attribute represents the interval in milliseconds at
+//			which the counter resets itself. (R, W) (optional) (2 bytes)
+//
+//		Threshold Value For Discarded Blocks Due To Buffer Overflow
+//			Threshold value for discarded blocks due to buffer overflow: This attribute specifies the
+//			threshold for the number of bytes (scaled by the priority queue scale factor attribute of the
+//			ONU2G) discarded on this queue due to buffer overflow. Its value controls the declaration of the
+//			block loss alarm. (R, W) (optional) (2 bytes)
+//
+//		Related Port
+//			If flexible configuration is not supported, the ONU should reject an attempt to set the related
+//			port with a parameter error result-reason code.
+//
+//		Traffic Scheduler Pointer
+//			The ONU should reject an attempt to violate these conditions with a parameter error result-
+//			reason code.
+//
+//		Weight
+//			Weight:	This attribute represents weight for WRR scheduling. At a given priority level, capacity
+//			is distributed to non-empty queues in proportion to their weights. In the upstream direction,
+//			this weight is meaningful if several priority queues are associated with a traffic scheduler or
+//			T-CONT whose policy is WRR. In the downstream direction, this weight is used by a UNI in a WRR
+//			fashion. Upon ME instantiation, the ONU sets this attribute to 1. (R, W) (mandatory) (1 byte)
+//
+//		Back Pressure Operation
+//			Back pressure operation: This attribute enables (0) or disables (1) back pressure operation. Its
+//			default value is 0. (R, W) (mandatory) (2 bytes)
+//
+//		Back Pressure Time
+//			Back pressure time: This attribute specifies the duration in microseconds of the backpressure
+//			signal. It can be used as a pause time for an Ethernet UNI. Upon ME instantiation, the ONU sets
+//			this attribute to 0. (R, W) (mandatory) (4 bytes)
+//
+//		Back Pressure Occur Queue Threshold
+//			Back pressure occur queue threshold: This attribute identifies the threshold queue occupancy, in
+//			bytes, scaled by the priority queue scale factor attribute of the ONU2G, to start sending a
+//			back-pressure signal. (R, W) (mandatory) (2 bytes)
+//
+//		Back Pressure Clear Queue Threshold
+//			Back pressure clear queue threshold: This attribute identifies the threshold queue occupancy, in
+//			bytes, scaled by the priority queue scale factor attribute of the ONU2G, to stop sending a back-
+//			pressure signal. (R, W) (mandatory) (2 bytes)
+//
+//		Packet Drop Queue Thresholds
+//			Packet drop queue thresholds: This attribute is a composite of four 2 byte values, a minimum and
+//			a maximum threshold, measured in bytes, scaled by the priority queue scale factor attribute of
+//			the ONU2-G, for green and yellow packets. The first value is the minimum green threshold, the
+//			queue occupancy below which all green packets are admitted to the queue. The second value is the
+//			maximum green threshold, the queue occupancy at or above which all green packets are discarded.
+//			The third value is the minimum yellow threshold, the queue occupancy below which all yellow
+//			packets are admitted to the queue. The fourth value is the maximum yellow threshold, the queue
+//			occupancy at or above which all yellow packets are discarded. The default is that all thresholds
+//			take the value of the maximum queue size. (R, W) (optional) (8 bytes)
+//
+//		Packet Drop Max_P
+//			Packet drop max_p: This attribute is a composite of two 1 byte values, the probability of
+//			dropping a coloured packet when the queue occupancy lies just below the maximum threshold for
+//			packets of that colour. The first value is the green packet max_p, and the second value is the
+//			yellow packet max_p. The probability, max_p, is determined by adding one to the unsigned value
+//			(0..255) of this attribute and dividing the result by 256. The default for each value is 255.
+//			(R, W) (optional) (2 bytes)
+//
+//		Queue Drop W_Q
+//			Queue drop w_q: This attribute determines the averaging coefficient, w_q, as described in
+//			[b-Floyd]. The averaging coefficient, w_q, is equal to 2Queue_drop_w_q. For example, when queue
+//			drop_w_q has the value 9, the averaging coefficient, w_q, is 1/512 = 0.001 9. The default value
+//			is 9. (R, W) (optional) (1 byte)
+//
+//		Drop Precedence Colour Marking
+//			(R, W) (optional) (1 byte)
+//
+type PriorityQueue struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	priorityqueueBME = &ManagedEntityDefinition{
+		Name:    "PriorityQueue",
+		ClassID: 277,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("QueueConfigurationOption", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("MaximumQueueSize", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint16Field("AllocatedQueueSize", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  Uint16Field("DiscardBlockCounterResetInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  Uint16Field("ThresholdValueForDiscardedBlocksDueToBufferOverflow", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  Uint32Field("RelatedPort", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint16Field("TrafficSchedulerPointer", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("Weight", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  Uint16Field("BackPressureOperation", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: Uint32Field("BackPressureTime", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: Uint16Field("BackPressureOccurQueueThreshold", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: Uint16Field("BackPressureClearQueueThreshold", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 12),
+			13: Uint64Field("PacketDropQueueThresholds", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: Uint16Field("PacketDropMaxP", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: ByteField("QueueDropWQ", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+			16: ByteField("DropPrecedenceColourMarking", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewPriorityQueue (class ID 277 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPriorityQueue(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(priorityqueueBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/pseudowiremaintenanceprofile.go b/vendor/github.com/cboling/omci/generated/pseudowiremaintenanceprofile.go
new file mode 100644
index 0000000..922553e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/pseudowiremaintenanceprofile.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PseudowireMaintenanceProfileClassId ClassID = ClassID(284)
+
+var pseudowiremaintenanceprofileBME *ManagedEntityDefinition
+
+// PseudowireMaintenanceProfile (class ID #284)
+//	The pseudowire maintenance profile permits the configuration of pseudowire service exception
+//	handling. It is created and deleted by the OLT.
+//
+//	The settings, and indeed existence, of a pseudowire maintenance profile affect the behaviour of
+//	the pseudowire PM history data ME only in establishing criteria for counting SESs, but in no
+//	other way. The pseudowire maintenance profile primarily affects the alarms declared by the
+//	subscribing pseudowire TP.
+//
+//	Relationships
+//		One or more instances of the pseudowire TP may point to an instance of the pseudowire
+//		maintenance profile. If the pseudowire TP does not refer to a pseudowire maintenance profile,
+//		the ONU's default exception handling is implied.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Jitter Buffer Maximum Depth
+//			Jitter buffer maximum depth: This attribute specifies the desired maximum depth of the playout
+//			buffer in the PSN to the TDM direction. The value is expressed as a multiple of the 125 μs frame
+//			rate. The default value 0 selects the ONU's internal policy. (R, W, setbycreate) (optional)
+//			(2 bytes)
+//
+//		Jitter Buffer Desired Depth
+//			Jitter buffer desired depth: This attribute specifies the desired nominal fill depth of the
+//			playout buffer in the PSN to the TDM direction. The value is expressed as a multiple of the
+//			125 μs frame rate. The default value 0 selects the ONU's internal policy. (R, W, setbycreate)
+//			(optional) (2 bytes)
+//
+//		Fill Policy
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Misconnected Packets Declaration Policy
+//			Misconnected packets declaration policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Misconnected Packets Clear Policy
+//			Misconnected packets clear policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Loss Of Packets Declaration Policy
+//			Loss of packets declaration policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Loss Of Packets Clear Policy
+//			Loss of packets clear policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Buffer Overrun_Underrun Declaration Policy
+//			Buffer overrun/underrun declaration policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Buffer Overrun_Underrun Clear Policy
+//			Buffer overrun/underrun clear policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Malformed Packets Declaration Policy
+//			Malformed packets declaration policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		Malformed Packets Clear Policy
+//			Malformed packets clear policy: (R, W, setbycreate) (optional) (1 byte)
+//
+//		R_Bit Transmit Set Policy
+//			R-bit transmit set policy: This attribute defines the number of consecutive lost packets that
+//			causes the transmitted R bit to be set in the TDM to the PSN direction, indicating lost packets
+//			to the far end. The default value 0 selects the ONU's internal policy. (R, W, setbycreate)
+//			(optional) (1 byte)
+//
+//		R_Bit Transmit Clear Policy
+//			R-bit transmit clear policy: This attribute defines the number of consecutive valid packets that
+//			causes the transmitted R bit to be cleared in the TDM to the PSN direction, removing the remote
+//			failure indication to the far end. The default value 0 selects the ONU's internal policy. (R, W,
+//			setbycreate) (optional) (1 byte)
+//
+//		R_Bit Receive Policy
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		L Bit Receive Policy
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Ses Threshold
+//			SES threshold: Number of lost, malformed or otherwise unusable packets expected in the PSN to
+//			the TDM direction within a 1 s interval that causes an SES to be counted. Stray packets do not
+//			count towards an SES, nor do packets whose L bit is set at the far end. The value 0 specifies
+//			that the ONU uses its internal default, which is not necessarily the same as the recommended
+//			default value 3. (R, W, set-by-create) (optional) (2 bytes)
+//
+type PseudowireMaintenanceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	pseudowiremaintenanceprofileBME = &ManagedEntityDefinition{
+		Name:    "PseudowireMaintenanceProfile",
+		ClassID: 284,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("JitterBufferMaximumDepth", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 1),
+			2:  Uint16Field("JitterBufferDesiredDepth", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+			3:  ByteField("FillPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4:  ByteField("MisconnectedPacketsDeclarationPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5:  ByteField("MisconnectedPacketsClearPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6:  ByteField("LossOfPacketsDeclarationPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 6),
+			7:  ByteField("LossOfPacketsClearPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8:  ByteField("BufferOverrunUnderrunDeclarationPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 8),
+			9:  ByteField("BufferOverrunUnderrunClearPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 9),
+			10: ByteField("MalformedPacketsDeclarationPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 10),
+			11: ByteField("MalformedPacketsClearPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 11),
+			12: ByteField("RBitTransmitSetPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 12),
+			13: ByteField("RBitTransmitClearPolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 13),
+			14: ByteField("RBitReceivePolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 14),
+			15: ByteField("LBitReceivePolicy", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 15),
+			16: Uint16Field("SesThreshold", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewPseudowireMaintenanceProfile (class ID 284 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPseudowireMaintenanceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(pseudowiremaintenanceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/pseudowireperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/pseudowireperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..599eb93
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/pseudowireperformancemonitoringhistorydata.go
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PseudowirePerformanceMonitoringHistoryDataClassId ClassID = ClassID(285)
+
+var pseudowireperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// PseudowirePerformanceMonitoringHistoryData (class ID #285)
+//	This ME collects PM for a pseudowire TP. Most of the attributes monitor packets received from
+//	the PSN, and may therefore be considered egress PM. For the most part, ingress PM is collected
+//	at the CES PPTP ME.
+//
+//	NOTE – The pseudowire PM history data ME collects data similar, but not identical, to that
+//	available from the MAC bridge port PM history data ME associated with a MAC bridge. When the
+//	pseudowire is bridge-based, it may not be necessary to collect both.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the pseudowire TP.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the pseudowire TP. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Received Packets
+//			Received packets: This attribute counts the total number of packets, both payload and
+//			signalling, received in the PSN to the TDM direction. (R) (mandatory) (4 bytes)
+//
+//		Transmitted Packets
+//			Transmitted packets: This attribute counts the total number of packets, both payload and
+//			signalling, transmitted in the TDM to the PSN direction. The count includes packets whose L bit
+//			is set and which may therefore not contain a payload. (R) (mandatory) (4 bytes)
+//
+//		Missing Packets
+//			Missing packets: This attribute counts the number of lost packets, as indicated by gaps in the
+//			control word numbering sequence. Both payload and signalling packets, if any, contribute to this
+//			count. (R) (mandatory) (4 bytes)
+//
+//		Misordered Packets, Usable
+//			Misordered packets, usable: This attribute counts the number of packets received out of order,
+//			but which were able to be successfully re-ordered and played out. Both payload and signalling
+//			packets, if any, contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Misordered Packets Dropped
+//			Misordered packets dropped: This attribute counts the number of packets received out of sequence
+//			that were discarded, either because the ONU did not support reordering or because it was too
+//			late to reorder them. Both payload and signalling packets, if any, contribute to this count. (R)
+//			(mandatory) (4 bytes)
+//
+//		Playout Buffer Underruns_Overruns
+//			Playout buffer underruns/overruns: This attribute counts the number of packets that were
+//			discarded because they arrived too late or too early to be played out. Both payload and
+//			signalling packets, if any, contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Malformed Packets
+//			Malformed packets: This attribute counts the number of malformed packets, e.g., because the
+//			packet length was not as expected or because of an unexpected RTP payload type. Both payload and
+//			signalling packets, if any, contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Stray Packets
+//			Stray packets: This attribute counts the number of packets whose ECID or RTP SSRC failed to
+//			match the expected value, or which are otherwise known to have been misdelivered. Stray packets
+//			are discarded without affecting any of the other PM counters. Both payload and signalling
+//			packets, if any, contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Remote Packet Loss
+//			Remote packet loss: This attribute counts received packets whose R bit is set, indicating the
+//			loss of packets at the far end. Both payload and signalling packets, if any, contribute to this
+//			count. (R) (mandatory) (4 bytes)
+//
+//		Tdm L_Bit Packets Transmitted
+//			TDM L-bit packets transmitted: This attribute counts the number of packets transmitted with the
+//			L bit set, indicating a near-end TDM fault. Both payload and signalling packets, if any,
+//			contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Es
+//			ES:	This attribute counts errored seconds. Any discarded, lost, malformed or unusable packet
+//			received from the PSN during a given second causes this counter to increment. Both payload and
+//			signalling packets, if any, contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Ses
+//			SES:	This attribute counts severely errored seconds. The criterion for an SES may be configured
+//			through the pseudowire maintenance profile ME. Both payload and signalling packets, if any,
+//			contribute to this count. (R) (mandatory) (4 bytes)
+//
+//		Uas
+//			UAS:	This attribute counts unavailable seconds. An unavailable second begins at the onset of 10
+//			consecutive SES and ends at the onset of 10 consecutive seconds that are not severely errored. A
+//			service is unavailable if either its payload or its signalling, if any, are unavailable. During
+//			unavailable time, only UAS should be counted; other anomalies should not be counted. (R)
+//			(mandatory) (4 bytes)
+//
+type PseudowirePerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	pseudowireperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "PseudowirePerformanceMonitoringHistoryData",
+		ClassID: 285,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("ReceivedPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TransmittedPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("MissingPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("MisorderedPackets,Usable", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("MisorderedPacketsDropped", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("PlayoutBufferUnderrunsOverruns", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("MalformedPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("StrayPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("RemotePacketLoss", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TdmLBitPacketsTransmitted", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("Es", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("Ses", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("Uas", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+		},
+	}
+}
+
+// NewPseudowirePerformanceMonitoringHistoryData (class ID 285 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPseudowirePerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(pseudowireperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/pseudowireterminationpoint.go b/vendor/github.com/cboling/omci/generated/pseudowireterminationpoint.go
new file mode 100644
index 0000000..b008785
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/pseudowireterminationpoint.go
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PseudowireTerminationPointClassId ClassID = ClassID(282)
+
+var pseudowireterminationpointBME *ManagedEntityDefinition
+
+// PseudowireTerminationPoint (class ID #282)
+//	The pseudowire TP supports packetized (rather than TDM) transport of TDM services, transported
+//	either directly over Ethernet, over UDP/IP or over MPLS. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	Relationships
+//		One pseudowire TP ME exists for each distinct TDM service that is mapped to a pseudowire.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Underlying Transport
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Service Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Signalling
+//			(R, W, setbycreate) (mandatory for structured service type) (1 byte)
+//
+//		Tdm Uni Pointer
+//			TDM UNI pointer: If service type = structured, this attribute points to a logical N × 64 kbit/s
+//			subport CTP. Otherwise, this attribute points to a PPTP CES UNI. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		North_Side Pointer
+//			North-side pointer: When the pseudowire service is transported via IP, as indicated by the
+//			underlying transport attribute, the northside pointer attribute points to an instance of the
+//			TCP/UDP config data ME. When the pseudowire service is transported directly over Ethernet, the
+//			north-side pointer attribute is not used – the linkage to the Ethernet flow TP is implicit in
+//			the ME IDs. When the pseudowire service is transported over MPLS, the northside pointer
+//			attribute points to an instance of the MPLS PW TP. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Far_End Ip Info
+//			A null pointer is appropriate if the pseudowire is not transported via IP. (R, W, setbycreate)
+//			(mandatory for IP transport) (2 bytes)
+//
+//		Payload Size
+//			(R, W, setbycreate) (mandatory for unstructured service) (2 bytes)
+//
+//		Payload Encapsulation Delay
+//			(R, W, setbycreate) (mandatory for structured service) (1 byte)
+//
+//		Timing Mode
+//			(R, W) (mandatory) (1 byte)
+//
+//		Transmit Circuit Id
+//			(R, W) (mandatory for MEF 8 transport) (8 bytes)
+//
+//		Expected Circuit Id
+//			(R, W) (optional for MEF 8 transport) (8 bytes)
+//
+//		Received Circuit Id
+//			Received circuit ID: This attribute indicates the actual ECID(s) received on the payload and
+//			signalling channels, respectively. It may be used for diagnostic purposes. (R) (optional for MEF
+//			8 transport) (8 bytes)
+//
+//		Exception Policy
+//			Exception policy: This attribute points to an instance of the pseudowire maintenance profile ME.
+//			If the pointer has its default value 0, the ONU's internal defaults apply. (R, W) (optional)
+//			(2 bytes)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+type PseudowireTerminationPoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	pseudowireterminationpointBME = &ManagedEntityDefinition{
+		Name:    "PseudowireTerminationPoint",
+		ClassID: 282,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("UnderlyingTransport", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  ByteField("ServiceType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("Signalling", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint16Field("TdmUniPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("NorthSidePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint16Field("FarEndIpInfo", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  Uint16Field("PayloadSize", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("PayloadEncapsulationDelay", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("TimingMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: Uint64Field("TransmitCircuitId", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: Uint64Field("ExpectedCircuitId", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: Uint64Field("ReceivedCircuitId", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint16Field("ExceptionPolicy", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 14),
+			15: ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewPseudowireTerminationPoint (class ID 282 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPseudowireTerminationPoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(pseudowireterminationpointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/pwatmperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/pwatmperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..f82cf04
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/pwatmperformancemonitoringhistorydata.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PwAtmPerformanceMonitoringHistoryDataClassId ClassID = ClassID(338)
+
+var pwatmperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// PwAtmPerformanceMonitoringHistoryData (class ID #338)
+//	This ME collects PM data associated with an ATM pseudowire. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PW ATM configuration data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to the instance of the PW ATM configuration data ME.
+//			(R, setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Downstream Missing Packets Counter
+//			Downstream missing packets counter: This attribute counts missing packets, as detected via
+//			control word sequence number gaps. (R) (mandatory) (4 bytes)
+//
+//		Downstream Reordered Packets Counter
+//			Downstream reordered packets counter: This attribute counts packets detected out of sequence via
+//			the control word sequence number, but successfully reordered. Some implementations may not
+//			support this feature. (R) (optional) (4 bytes)
+//
+//		Downstream Misordered Packets Counter
+//			Downstream misordered packets counter: This attribute counts packets detected out of order via
+//			the control word sequence numbers. (R) (mandatory) (4 bytes)
+//
+//		Upstream Timeout Packets Counter
+//			Upstream timeout packets counter: This attribute counts packets transmitted due to timeout
+//			expiration while attempting to collect cells. (R) (mandatory) (4 bytes)
+//
+//		Upstream Transmitted Cells Counter
+//			Upstream transmitted cells counter: This attribute counts transmitted cells. (R) (mandatory)
+//			(4 bytes)
+//
+//		Upstream Dropped Cells Counter
+//			Upstream dropped cells counter: This attribute counts dropped cells. (R) (mandatory) (4 bytes)
+//
+//		Upstream Received Cells Counter
+//			Upstream received cells counter: This attribute counts received cells. (R) (mandatory) (4 bytes)
+//
+type PwAtmPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	pwatmperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "PwAtmPerformanceMonitoringHistoryData",
+		ClassID: 338,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("DownstreamMissingPacketsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("DownstreamReorderedPacketsCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: Uint32Field("DownstreamMisorderedPacketsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("UpstreamTimeoutPacketsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("UpstreamTransmittedCellsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint32Field("UpstreamDroppedCellsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9: Uint32Field("UpstreamReceivedCellsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+		},
+	}
+}
+
+// NewPwAtmPerformanceMonitoringHistoryData (class ID 338 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPwAtmPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(pwatmperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/pwethernetconfigurationdata.go b/vendor/github.com/cboling/omci/generated/pwethernetconfigurationdata.go
new file mode 100644
index 0000000..6bcb70b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/pwethernetconfigurationdata.go
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const PwEthernetConfigurationDataClassId ClassID = ClassID(339)
+
+var pwethernetconfigurationdataBME *ManagedEntityDefinition
+
+// PwEthernetConfigurationData (class ID #339)
+//	This ME contains the Ethernet pseudowire configuration data. Instances of this ME are created
+//	and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the MPLS pseudowire TP ME with a
+//		pseudowire type attribute equal to the following.////		5	Ethernet
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Mpls Pseudowire Tp Pointer
+//			MPLS pseudowire TP pointer: This attribute points to an instance of the MPLS pseudowire TP ME
+//			associated with this ME. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tp Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Uni Pointer
+//			UNI pointer: This attribute points to the associated instance of a UNI-side ME. The type of UNI
+//			is determined by the TP type attribute. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+type PwEthernetConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	pwethernetconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "PwEthernetConfigurationData",
+		ClassID: 339,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("MplsPseudowireTpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("TpType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("UniPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewPwEthernetConfigurationData (class ID 339 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewPwEthernetConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(pwethernetconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/radiusperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/radiusperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..93be0f3
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/radiusperformancemonitoringhistorydata.go
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const RadiusPerformanceMonitoringHistoryDataClassId ClassID = ClassID(293)
+
+var radiusperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// RadiusPerformanceMonitoringHistoryData (class ID #293)
+//	This ME collects performance statistics on an ONU's radius client, particularly as related to
+//	its IEEE 802.1X operation.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an ONU.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID (namely 0), this ME is implicitly linked to an instance of a dot1X configuration
+//			profile. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Access_Request Packets Transmitted
+//			Access-request packets transmitted: This attribute counts transmitted radius access-request
+//			messages, including retransmissions. (R) (mandatory) (4 bytes)
+//
+//		Access_Request Retransmission Count
+//			Access-request retransmission count: This attribute counts radius access-request
+//			retransmissions. (R) (mandatory) (4 bytes)
+//
+//		Access_Challenge Packets Received
+//			Access-challenge packets received: This attribute counts received radius access-challenge
+//			messages. (R) (mandatory) (4 bytes)
+//
+//		Access_Accept Packets Received
+//			Access-accept packets received: This attribute counts received radius access-accept messages.
+//			(R) (mandatory) (4 bytes)
+//
+//		Access_Reject Packets Received
+//			Access-reject packets received: This attribute counts received radius access-reject messages.
+//			(R) (mandatory) (4 bytes)
+//
+//		Invalid Radius Packets Received
+//			Invalid radius packets received: This attribute counts received invalid radius messages. (R)
+//			(mandatory) (4 bytes)
+//
+type RadiusPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	radiusperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "RadiusPerformanceMonitoringHistoryData",
+		ClassID: 293,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("AccessRequestPacketsTransmitted", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("AccessRequestRetransmissionCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("AccessChallengePacketsReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("AccessAcceptPacketsReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("AccessRejectPacketsReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint32Field("InvalidRadiusPacketsReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewRadiusPerformanceMonitoringHistoryData (class ID 293 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewRadiusPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(radiusperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/reani-g.go b/vendor/github.com/cboling/omci/generated/reani-g.go
new file mode 100644
index 0000000..7ee4996
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/reani-g.go
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ReAniGClassId ClassID = ClassID(313)
+
+var reanigBME *ManagedEntityDefinition
+
+// ReAniG (class ID #313)
+//	This ME organizes data associated with each R'/S' physical interface of an RE if the RE supports
+//	OEO regeneration in either direction. The management ONU automatically creates one instance of
+//	this ME for each R'/S' physical port (uni- or bidirectional) as follows.
+//
+//	•	When the RE has mid-span PON RE ANI interface ports built into its 	factory configuration.
+//
+//	•	When a cardholder is provisioned to expect a circuit pack of the mid-span PON RE ANI type.
+//
+//	•	When a cardholder provisioned for plug-and-play is equipped with a circuit pack of the midspan
+//	PON RE ANI type. Note that the installation of a plug-and-play card may indicate the presence of
+//	a mid-span PON RE ANI port via equipment ID as well as its type attribute, and indeed may cause
+//	the management ONU to instantiate a port-mapping package to specify the ports precisely.
+//
+//	The management ONU automatically deletes instances of this ME when a cardholder is neither
+//	provisioned to expect a mid-span PON RE ANI circuit pack, nor is it equipped with a mid-span PON
+//	RE ANI circuit pack.
+//
+//	As illustrated in Figure 8.2.10-4, an RE ANI-G may share the physical port with an RE downstream
+//	amplifier. The ONU declares a shared configuration through the port-mapping package combined
+//	port table, whose structure defines one ME as the master. It is recommended that the RE ANI-G be
+//	the master, with the RE downstream amplifier as a secondary ME.
+//
+//	The administrative state, operational state and ARC attributes of the master ME override similar
+//	attributes in secondary MEs associated with the same port. In the secondary ME, these attributes
+//	are present, but cause no action when written and have undefined values when read. The RE
+//	downstream amplifier should use its provisionable downstream alarm thresholds and should declare
+//	downstream alarms as necessary; other isomorphic alarms should be declared by the RE ANI-G. The
+//	test action should be addressed to the master ME.
+//
+//	Relationships
+//		An instance of this ME is associated with each R'/S' physical interface of an RE that includes
+//		OEO regeneration in either direction, and with one or more instances of the PPTP RE UNI. It may
+//		also be associated with an RE downstream amplifier.
+//
+//	Attributes
+//		Managed Entity Id
+//			NOTE 1 – This ME ID may be identical to that of an RE downstream amplifier if it shares the same
+//			physical slot and port.
+//
+//		Administrative State
+//			NOTE 2 – When an RE supports multiple PONs, or protected access to a single PON, its primary
+//			ANI-G cannot be completely shut down, due to a loss of the management communications capability.
+//			Complete blocking of service and removal of power may nevertheless be appropriate for secondary
+//			RE ANI-Gs. Administrative lock suppresses alarms and notifications for an RE ANI-G, be it either
+//			primary or secondary.
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Optical Signal Level
+//			Optical signal level: This attribute reports the current measurement of total downstream optical
+//			power. Its value is a 2s complement integer referred to 1 mW (i.e., dBm), with 0.002 dB
+//			granularity. (R) (optional) (2 bytes)
+//
+//		Lower Optical Threshold
+//			Lower optical threshold: This attribute specifies the optical level that the RE uses to declare
+//			the downstream low received optical power alarm. Valid values are –127 dBm (coded as 254) to
+//			0 dBm (coded as 0) in 0.5 dB increments. The default value 0xFF selects the RE's internal
+//			policy. (R, W) (optional) (1 byte)
+//
+//		Upper Optical Threshold
+//			Upper optical threshold: This attribute specifies the optical level that the RE uses to declare
+//			the downstream high received optical power alarm. Valid values are –127 dBm (coded as 254) to
+//			0 dBm (coded as 0) in 0.5 dB increments. The default value 0xFF selects the RE's internal
+//			policy. (R, W) (optional) (1 byte)
+//
+//		Transmit Optical Level
+//			Transmit optical level: This attribute reports the current measurement of mean optical launch
+//			power. Its value is a 2s complement integer referred to 1 mW (i.e., dBm), with 0.002 dB
+//			granularity. (R) (optional) (2 bytes)
+//
+//		Lower Transmit Power Threshold
+//			Lower transmit power threshold: This attribute specifies the minimum mean optical launch power
+//			that the RE uses to declare the low transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value 0x7F selects
+//			the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		Upper Transmit Power Threshold
+//			Upper transmit power threshold: This attribute specifies the maximum mean optical launch power
+//			that the RE uses to declare the high transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value 0x7F selects
+//			the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		Usage Mode
+//			3	This R'/S' interface is used as the uplink for both the embedded management ONU and one or
+//			more PPTP RE UNI(s) (in a time division fashion).
+//
+//		Target Upstream Frequency
+//			Target upstream frequency: This attribute specifies the frequency of the converted upstream
+//			signal on the optical trunk line (OTL), in gigahertz. The converted frequency must conform to
+//			the frequency plan specified in [ITUT G.984.6]. The value 0 means that the upstream signal
+//			frequency remains the same as the original frequency; no frequency conversion is done. If the RE
+//			does not support provisionable upstream frequency (wavelength), this attribute should take the
+//			fixed value representing the RE's capability and the RE should deny attempts to set the value of
+//			the attribute. If the RE does support provisionable upstream frequency conversion, the default
+//			value of this attribute is 0. (R, W) (optional) (4 bytes).
+//
+//		Target Downstream Frequency
+//			Target downstream frequency: This attribute specifies the frequency of the downstream signal
+//			received by the RE on the OTL, in gigahertz. The incoming frequency must conform to the
+//			frequency plan specified in [ITUT G.984.6]. The default value 0 means that the downstream
+//			frequency remains the same as its original frequency; no frequency conversion is done. If the RE
+//			does not support provisionable downstream frequency selectivity, this attribute should take the
+//			fixed value representing the RE's capability, and the RE should deny attempts to set the value
+//			of the attribute. If the RE does support provisionable downstream frequency selectivity, the
+//			default value of this attribute is 0. (R, W) (optional) (4 bytes).
+//
+//		Upstream Signal Transmission Mode
+//			Upstream signal transmission mode: When true, this Boolean attribute enables conversion from
+//			burst mode to continuous mode. The default value false specifies burst mode upstream
+//			transmission. If the RE does not have the ability to convert from burst to continuous mode
+//			transmission, it should deny attempts to set this attribute to true. (R, W) (optional) (1 byte)
+//
+type ReAniG struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	reanigBME = &ManagedEntityDefinition{
+		Name:    "ReAniG",
+		ClassID: 313,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 2),
+			3:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 3),
+			4:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  Uint16Field("OpticalSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  ByteField("LowerOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("UpperOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  Uint16Field("TransmitOpticalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  ByteField("LowerTransmitPowerThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: ByteField("UpperTransmitPowerThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("UsageMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: Uint32Field("TargetUpstreamFrequency", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint32Field("TargetDownstreamFrequency", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: ByteField("UpstreamSignalTransmissionMode", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+		},
+	}
+}
+
+// NewReAniG (class ID 313 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewReAniG(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(reanigBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/recommonamplifierparameters.go b/vendor/github.com/cboling/omci/generated/recommonamplifierparameters.go
new file mode 100644
index 0000000..cf74734
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/recommonamplifierparameters.go
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ReCommonAmplifierParametersClassId ClassID = ClassID(328)
+
+var recommonamplifierparametersBME *ManagedEntityDefinition
+
+// ReCommonAmplifierParameters (class ID #328)
+//	This ME organizes data associated with each OA supported by the RE. The management ONU
+//	automatically creates one instance of this ME for each upstream or downstream OA.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the RE downstream amplifier or RE
+//		upstream amplifier ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			NOTE – The type of the linked ME can be determined by uniqueness of slot and port.
+//
+//		Gain
+//			Gain:	This attribute reports the current measurement of the OA's gain, in decibels. Its value is
+//			a 2s complement integer with 0.25 dB granularity, and with a range from –32 dB to 31.5 dB. The
+//			value 0x7F indicates that the current measured gain is 0, i.e., negative infinity in decibels
+//			terms. (R) (optional) (1 byte)
+//
+//		Lower Gain Threshold
+//			Lower gain threshold: This attribute specifies the gain the RE uses to declare the low gain
+//			alarm. Valid values are 0 dB (coded as 0x00) to 63.5 dB (coded as 0xFE). The default value 0xFF
+//			selects the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		Upper Gain Threshold
+//			Upper gain threshold: This attribute specifies the gain the RE uses to declare the high gain
+//			alarm. Valid values are 0 dB (coded as 0x00) to 63.5 dB (coded as 0xFE). The default value 0xFF
+//			selects the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		Target Gain
+//			Target gain:	This attribute specifies the target gain, when the operational mode of the parent
+//			RE downstream or upstream amplifier is set to constant gain mode. Valid values are 0 dB (coded
+//			as 0x00) to 63.5 dB (coded as 0xFE). The default value 0xFF selects the RE's internal policy.
+//			(R, W) (optional) (1 byte)
+//
+//		Device Temperature
+//			Device temperature: This attribute reports the temperature in degrees Celcius of the active
+//			device (SOA or pump) in the OA. Its value is a 2s complement integer with granularity 1/256 °C.
+//			(R) (optional) (2 bytes)
+//
+//		Lower Device Temperature Threshold
+//			Lower device temperature threshold: This attribute is a 2s complement integer that specifies the
+//			temperature the RE uses to declare the low temperature alarm. Valid values are –64 to +63 °C in
+//			0.5 °C increments. The default value 0x7F selects the RE's internal policy. (R, W) (optional)
+//			(1 byte)
+//
+//		Upper Device Temperature Threshold
+//			Upper device temperature threshold: This attribute is a 2s complement integer that specifies the
+//			temperature the RE uses to declare the high temperature alarm. Valid values are –64 to +63 °C in
+//			0.5 °C increments. The default value 0x7F selects the RE's internal policy. (R, W) (optional)
+//			(1 byte)
+//
+//		Device Bias Current
+//			Device bias current: This attribute contains the measured bias current applied to the SOA or
+//			pump laser. Its value is an unsigned integer with granularity 2 mA. Valid values are 0 to
+//			512 mA. (R) (optional) (1 byte)
+//
+//		Amplifier Saturation Output Power
+//			Amplifier saturation output power: This attribute reports the saturation output power of the
+//			amplifier as specified by the manufacturer. Its value is an unsigned integer referred to 1 mW
+//			(i.e., dBm), with 0.1 dB granularity. (R) (optional) (2 bytes)
+//
+//		Amplifier Noise Figure
+//			Amplifier noise figure: This attribute reports the intrinsic noise figure of the amplifier, as
+//			specified by the manufacturer. Its value is an unsigned integer with 0.1 dB granularity (R)
+//			(optional) (1 byte)
+//
+//		Amplifier Saturation Gain
+//			Amplifier saturation gain: This attribute reports the gain of the amplifier at saturation, as
+//			specified by the manufacturer. Its value is an unsigned integer with 0.25 dB granularity, and
+//			with a range from 0 to 63.75 dB. (R) (optional) (1 byte)
+//
+type ReCommonAmplifierParameters struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	recommonamplifierparametersBME = &ManagedEntityDefinition{
+		Name:    "ReCommonAmplifierParameters",
+		ClassID: 328,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("Gain", 0, mapset.NewSetWith(Read), false, false, true, false, 1),
+			2:  ByteField("LowerGainThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 2),
+			3:  ByteField("UpperGainThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4:  ByteField("TargetGain", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  Uint16Field("DeviceTemperature", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  ByteField("LowerDeviceTemperatureThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("UpperDeviceTemperatureThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  ByteField("DeviceBiasCurrent", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint16Field("AmplifierSaturationOutputPower", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: ByteField("AmplifierNoiseFigure", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: ByteField("AmplifierSaturationGain", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+		},
+	}
+}
+
+// NewReCommonAmplifierParameters (class ID 328 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewReCommonAmplifierParameters(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(recommonamplifierparametersBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/redownstreamamplifier.go b/vendor/github.com/cboling/omci/generated/redownstreamamplifier.go
new file mode 100644
index 0000000..d68ef66
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/redownstreamamplifier.go
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ReDownstreamAmplifierClassId ClassID = ClassID(316)
+
+var redownstreamamplifierBME *ManagedEntityDefinition
+
+// ReDownstreamAmplifier (class ID #316)
+//	This ME organizes data associated with each OA for downstream data supported by the RE. The
+//	management ONU automatically creates one instance of this ME for each downstream OA as follows.
+//
+//	•	When the RE has mid-span PON RE downstream OA ports built into its factory configuration.
+//
+//	•	When a cardholder is provisioned to expect a circuit pack of the mid-span PON RE downstream OA
+//	type.
+//
+//	•	When a cardholder provisioned for plug-and-play is equipped with a circuit pack of the midspan
+//	PON RE downstream OA type. Note that the installation of a plug-and-play card may indicate the
+//	presence of a mid-span PON RE downstream OA via equipment ID as well as its type attribute, and
+//	indeed may cause the management ONU to instantiate a port-mapping package to specify the ports
+//	precisely.
+//
+//	The management ONU automatically deletes instances of this ME when a cardholder is neither
+//	provisioned to expect a mid-span PON RE downstream OA circuit pack, nor is it equipped with a
+//	mid-span PON RE downstream OA circuit pack.
+//
+//	Relationships
+//		An instance of this ME is associated with a downstream OA and with an instance of a circuit
+//		pack. If the RE includes OEO regeneration in either direction, the RE downstream amplifier is
+//		also associated with an RE ANI-G. Refer to clause 9.14.1 for further discussion.
+//
+//	Attributes
+//		Managed Entity Id
+//			NOTE 1 – This ME ID may be identical to that of an RE ANI-G if it shares the same physical slot-
+//			port.
+//
+//		Administrative State
+//			NOTE 2– When an RE supports multiple PONs, or protected access to a single PON, its primary
+//			ANI-G cannot be completely shut down, due to a loss of the management communications capability.
+//			Complete blocking of service and removal of power may nevertheless be appropriate for secondary
+//			RE ANI-Gs. Administrative lock suppresses alarms and notifications for both primary and
+//			secondary RE ANI-Gs. Administrative lock suppresses alarms and notifications for an RE
+//			downstream amplifier, be it either primary or secondary.
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Arc
+//			ARC:	See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Arc Interval
+//			ARC interval: See clause A.1.4.3. (R, W) (optional) (1 byte)
+//
+//		Operational Mode
+//			(R,W) (mandatory) (1 byte)
+//
+//		Input Optical Signal Level
+//			Input optical signal level: This attribute reports the current measurement of the input optical
+//			signal power of the downstream OA. Its value is a 2s complement integer referred to 1 mW (i.e.,
+//			dBm), with 0.002 dB granularity. (R) (optional) (2 bytes)
+//
+//		Lower Input Optical Threshold
+//			Lower input optical threshold: This attribute specifies the optical level the RE uses to declare
+//			the low received optical power alarm. Valid values are –127 dBm (coded as 254) to 0 dBm (coded
+//			as 0) in 0.5 dB increments. The default value 0xFF selects the RE's internal policy. (R, W)
+//			(optional) (1 byte)
+//
+//		Upper Input Optical Threshold
+//			Upper input optical threshold: This attribute specifies the optical level the RE uses to declare
+//			the high received optical power alarm. Valid values are –127 dBm (coded as 254) to 0 dBm (coded
+//			as 0) in 0.5 dB increments. The default value 0xFF selects the RE's internal policy. (R, W)
+//			(optional) (1 byte)
+//
+//		Output Optical Signal Level
+//			Output optical signal level: This attribute reports the current measurement of the mean optical
+//			launch power of the downstream OA. Its value is a 2s complement integer referred to 1 mW (i.e.,
+//			dBm), with 0.002 dB granularity. (R) (optional) (2 bytes)
+//
+//		Lower Output Optical Threshold
+//			Lower output optical threshold: This attribute specifies the minimum mean optical launch power
+//			that the RE uses to declare the low transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value 0x7F selects
+//			the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		Upper Output Optical Threshold
+//			Upper output optical threshold: This attribute specifies the maximum mean optical launch power
+//			that the RE uses to declare the high transmit optical power alarm. Its value is a 2s complement
+//			integer referred to 1 mW (i.e., dBm), with 0.5 dB granularity. The default value 0x7F selects
+//			the RE's internal policy. (R, W) (optional) (1 byte)
+//
+//		R'S' Splitter Coupling Ratio
+//			R'S' splitter coupling ratio: This attribute reports the coupling ratio of the splitter at the
+//			R'/S' interface that connects the embedded management ONU and the amplifiers to the OTL. Valid
+//			values are 99:1 (coded as 99 decimal) to 1:99 (coded as 1 decimal), where the first value is the
+//			value encoded and is the percentage of the optical signal connected to the amplifier. The
+//			default value 0xFF indicates that there is no splitter connected to this upstream/downstream
+//			amplifier pair. (R) (optional) (1 byte)
+//
+type ReDownstreamAmplifier struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	redownstreamamplifierBME = &ManagedEntityDefinition{
+		Name:    "ReDownstreamAmplifier",
+		ClassID: 316,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+			Test,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 2),
+			3:  ByteField("Arc", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 3),
+			4:  ByteField("ArcInterval", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  ByteField("OperationalMode", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint16Field("InputOpticalSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7:  ByteField("LowerInputOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  ByteField("UpperInputOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  Uint16Field("OutputOpticalSignalLevel", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: ByteField("LowerOutputOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("UpperOutputOpticalThreshold", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("R'S'SplitterCouplingRatio", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewReDownstreamAmplifier (class ID 316 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewReDownstreamAmplifier(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(redownstreamamplifierBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/rtpperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/rtpperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..c2d2021
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/rtpperformancemonitoringhistorydata.go
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const RtpPerformanceMonitoringHistoryDataClassId ClassID = ClassID(144)
+
+var rtpperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// RtpPerformanceMonitoringHistoryData (class ID #144)
+//	This ME collects PM data related to an RTP session. Instances of this ME are created and deleted
+//	by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the PPTP POTS UNI ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP POTS UNI ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rtp Errors
+//			RTP errors:	This attribute counts RTP packet errors. (R) (mandatory) (4 bytes)
+//
+//		Packet Loss
+//			Packet loss:	This attribute represents the fraction of packets lost. This attribute is
+//			calculated at the end of the 15 min interval, and is undefined under the get current data
+//			action. The value 0 indicates no packet loss, scaling linearly to 0xFFFF FFFF to indicate 100%
+//			packet loss (zero divided by zero is defined to be zero). (R) (mandatory) (4 bytes)
+//
+//		Maximum Jitter
+//			Maximum jitter: This attribute is a high water-mark that represents the maximum jitter
+//			identified during the measured interval, expressed in RTP timestamp units. (R) (mandatory)
+//			(4 bytes)
+//
+//		Maximum Time Between Real_Time Transport Control Protocol Rtcp Packets
+//			Maximum time between real-time transport control protocol (RTCP) packets: This attribute is a
+//			high water-mark that represents the maximum time between RTCP packets during the measured
+//			interval, in milliseconds. (R) (mandatory) (4 bytes)
+//
+//		Buffer Underflows
+//			Buffer underflows: This attribute counts the number of times the reassembly buffer underflows.
+//			In the case of continuous underflow caused by a loss of IP packets, a single buffer underflow
+//			should be counted. If the IW function is implemented with multiple buffers, such as a packet
+//			level buffer and a bit level buffer, then the underflow of either buffer increments this
+//			counter. (R) (mandatory) (4 bytes)
+//
+//		Buffer Overflows
+//			Buffer overflows: This attribute counts the number of times the reassembly buffer overflows. If
+//			the IW function is implemented with multiple buffers, such as a packet level buffer and a bit
+//			level buffer, then the overflow of either buffer increments this counter. (R) (mandatory)
+//			(4 bytes)
+//
+type RtpPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	rtpperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "RtpPerformanceMonitoringHistoryData",
+		ClassID: 144,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("RtpErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("PacketLoss", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("MaximumJitter", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("MaximumTimeBetweenRealTimeTransportControlProtocolRtcpPackets", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("BufferUnderflows", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint32Field("BufferOverflows", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewRtpPerformanceMonitoringHistoryData (class ID 144 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewRtpPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(rtpperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/rtpprofiledata.go b/vendor/github.com/cboling/omci/generated/rtpprofiledata.go
new file mode 100644
index 0000000..ea025bf
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/rtpprofiledata.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const RtpProfileDataClassId ClassID = ClassID(143)
+
+var rtpprofiledataBME *ManagedEntityDefinition
+
+// RtpProfileData (class ID #143)
+//	This ME configures RTP. It is conditionally required for ONUs that offer VoIP service. If a non-
+//	OMCI interface is used to manage VoIP, this ME is unnecessary.
+//
+//	An instance of this ME is created and deleted by the OLT. An RTP profile is needed for each
+//	unique set of attributes.
+//
+//	Relationships
+//		An instance of this ME may be associated with one or more VoIP media profile MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Local Port Min
+//			Local port min: This attribute defines the base UDP port that should be used by RTP for voice
+//			traffic. The recommended default is 50000 (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Local Port Max
+//			Local port max: This attribute defines the highest UDP port used by RTP for voice traffic. The
+//			value must be greater than the local port minimum. The value 0 specifies that the local port
+//			maximum be equal to the local port minimum. (R, W, set-by-create) (optional) (2 bytes)
+//
+//		Dscp Mark
+//			DSCP mark:	Diffserv code point to be used for outgoing RTP packets for this profile. The
+//			recommended default value is expedited forwarding (EF) = 0x2E. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Piggyback Events
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tone Events
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Dtmf Events
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Cas Events
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Ip Host Config Pointer
+//			IP host config pointer: This optional pointer associates the bearer (voice) flow with an IP host
+//			config data or IPv6 host config data ME. If this attribute is not present or is not populated
+//			with a valid pointer value, the bearer flow uses the same IP stack that is used for signalling,
+//			indicated by the TCP/UDP pointer in the associated SIP agent or MGC config data. The default
+//			value is 0xFFFF, a null pointer. (R, W) (optional) (2 bytes)
+//
+type RtpProfileData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	rtpprofiledataBME = &ManagedEntityDefinition{
+		Name:    "RtpProfileData",
+		ClassID: 143,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("LocalPortMin", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("LocalPortMax", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+			3: ByteField("DscpMark", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: ByteField("PiggybackEvents", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: ByteField("ToneEvents", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: ByteField("DtmfEvents", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: ByteField("CasEvents", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8: Uint16Field("IpHostConfigPointer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewRtpProfileData (class ID 143 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewRtpProfileData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(rtpprofiledataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/rtppseudowireparameters.go b/vendor/github.com/cboling/omci/generated/rtppseudowireparameters.go
new file mode 100644
index 0000000..018ced4
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/rtppseudowireparameters.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const RtpPseudowireParametersClassId ClassID = ClassID(283)
+
+var rtppseudowireparametersBME *ManagedEntityDefinition
+
+// RtpPseudowireParameters (class ID #283)
+//	If a pseudowire service uses RTP, the RTP pseudowire parameters ME provides configuration
+//	information for the RTP layer. Instances of this ME are created and deleted by the OLT. The use
+//	of RTP on a pseudowire is optional, and is determined by the existence of the RTP pseudowire
+//	parameters ME.
+//
+//	Relationships
+//		An instance of the RTP pseudowire parameters ME may exist for each pseudowire TP ME, to which it
+//		is implicitly bound by a common ME ID.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the pseudowire TP ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Clock Reference
+//			Clock reference: This attribute specifies the frequency of the common timing reference, in
+//			multiples of 8 kHz. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Rtp Timestamp Mode
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Ptype
+//			PTYPE:	This attribute specifies the RTP payload type in the TDM to the PSN direction. It
+//			comprises two 1 byte values. The first is for the payload channel, the second, for the optional
+//			separate signalling channel. Assignable PTYPEs lie in the dynamic range 96..127. If signalling
+//			is not transported in its own channel, the second value should be set to 0. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Ssrc
+//			SSRC:	This attribute specifies the RTP synchronization source in the TDM to the PSN direction.
+//			It comprises two 4 byte values. The first is for the payload channel, the second, for the
+//			optional separate signalling channel. If signalling is not transported in its own channel, the
+//			second value should be set to 0. (R, W, setbycreate) (mandatory) (8 bytes)
+//
+//		Expected Ptype
+//			Expected PTYPE: This attribute specifies the RTP payload type in the PSN to the TDM direction.
+//			The received payload type may be used to detect malformed packets. It comprises two 1 byte
+//			values. The first is for the payload channel, the second, for the optional separate signalling
+//			channel. To disable either or both of the check functions, set the corresponding value to its
+//			default value 0. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Expected Ssrc
+//			Expected SSRC: This attribute specifies the RTP synchronization source in the PSN to the TDM
+//			direction. The received SSRC may be used to detect misconnection (stray packets). It comprises
+//			two 4 byte values. The first is for the payload channel, the second, for the optional separate
+//			signalling channel. To disable either or both of the check functions, set the corresponding
+//			value to its default value 0. (R, W, setbycreate) (optional) (8 bytes)
+//
+type RtpPseudowireParameters struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	rtppseudowireparametersBME = &ManagedEntityDefinition{
+		Name:    "RtpPseudowireParameters",
+		ClassID: 283,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("ClockReference", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("RtpTimestampMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("Ptype", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint64Field("Ssrc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: Uint16Field("ExpectedPtype", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6: Uint64Field("ExpectedSsrc", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 6),
+		},
+	}
+}
+
+// NewRtpPseudowireParameters (class ID 283 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewRtpPseudowireParameters(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(rtppseudowireparametersBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/sipagentconfigdata.go b/vendor/github.com/cboling/omci/generated/sipagentconfigdata.go
new file mode 100644
index 0000000..a92e8b9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/sipagentconfigdata.go
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SipAgentConfigDataClassId ClassID = ClassID(150)
+
+var sipagentconfigdataBME *ManagedEntityDefinition
+
+// SipAgentConfigData (class ID #150)
+//	The SIP agent config data ME models a SIP signalling agent. It defines the configuration
+//	necessary to establish communication for signalling between the SIP user agent (UA) and a SIP
+//	server.
+//
+//	NOTE 1 – If a non-OMCI interface is used to manage SIP for VoIP, this ME is unnecessary. The
+//	non-OMCI interface supplies the necessary data, which may be read back to the OLT via the SIP
+//	config portal ME.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME serves one or more SIP user data MEs and points to a TCP/UDP config data
+//		that carries signalling messages. Other pointers establish additional agent parameters such as
+//		proxy servers.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Proxy Server Address Pointer
+//			Proxy server address pointer: This attribute points to a large string ME that contains the name
+//			(IP address or URI) of the SIP proxy server for SIP signalling messages. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Outbound Proxy Address Pointer
+//			Outbound proxy address pointer: An outbound SIP proxy may or may not be required within a given
+//			network. If an outbound SIP proxy is used, the outbound proxy address pointer attribute must be
+//			set to point to a valid large string ME that contains the name (IP address or URI) of the
+//			outbound proxy server for SIP signalling messages. If an outbound SIP proxy is not used, the
+//			outbound proxy address pointer attribute must be set to a null pointer. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Primary Sip Dns
+//			Primary SIP DNS: This attribute specifies the primary SIP DNS IP address. If the value of this
+//			attribute is 0, the primary DNS server is defined in the corresponding IP host config data or
+//			IPv6 host config data ME. If the value is non-zero, it takes precedence over the primary DNS
+//			server defined in the IP host config data or IPv6 host config data ME. (R, W, set-by-create)
+//			(mandatory) (4 bytes)
+//
+//		Secondary Sip Dns
+//			Secondary SIP DNS: This attribute specifies the secondary SIP DNS IP address. If the value of
+//			this attribute is 0, the secondary DNS server is defined in the corresponding IP host config
+//			data or IPv6 host config data ME. If the value is non-zero, it takes precedence over the
+//			secondary DNS server defined in the IP host config data or IPv6 host config data ME. (R, W, set-
+//			by-create) (mandatory) (4 bytes)
+//
+//		Tcp_Udp Pointer
+//			TCP/UDP pointer: This pointer associates the SIP agent with the TCP/UDP config data ME to be
+//			used for communication with the SIP server. The default value is 0xFFFF, a null pointer. (R, W)
+//			(mandatory) (2 bytes)
+//
+//		Sip Reg Exp Time
+//			SIP reg exp time: This attribute specifies the SIP registration expiration time in seconds. If
+//			its value is 0, the SIP agent does not add an expiration time to the registration requests and
+//			does not perform reregistration. The default value is 3600 s. (R, W) (mandatory) (4 bytes)
+//
+//		Sip Rereg Head Start Time
+//			SIP rereg head start time: This attribute specifies the time in seconds prior to timeout that
+//			causes the SIP agent to start the re-registration process. The default value is 360 s. (R, W)
+//			(mandatory) (4 bytes)
+//
+//		Host Part Uri
+//			Host part URI: This attribute points to a large string ME that contains the host or domain part
+//			of the SIP address of record for users connected to this ONU. A null pointer indicates that the
+//			current address in the IP host config ME is to be used. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Sip Status
+//			(R) (mandatory) (1 byte)
+//
+//		Sip Registrar
+//			SIP registrar: This attribute points to a network address ME that contains the name (IP address
+//			or resolved name) of the registrar server for SIP signalling messages. Examples: "10.10.10.10"
+//			and "proxy.voip.net". (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Softswitch
+//			Softswitch:	This attribute identifies the SIP gateway softswitch vendor. The format is four
+//			ASCII coded alphabetic characters [A..Z] as defined in [ATIS0300220]. A value of four null bytes
+//			indicates an unknown or unspecified vendor. (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Sip Response Table
+//			NOTE 2 – This model assumes that SIP response tones and text are common to all POTS lines that
+//			share a given SIP agent.
+//
+//		Sip Option Transmit Control
+//			SIP option transmit control: This Boolean attribute specifies that the ONU is (true) or is not
+//			(false) enabled to transmit SIP options. The default value is recommended to be false. (R, W,
+//			setbycreate) (optional) (1 byte)
+//
+//		Sip Uri Format
+//			SIP URI format: This attribute specifies the format of the URI in outgoing SIP messages. The
+//			recommended default value 0 specifies TEL URIs; the value 1 specifies SIP URIs. Other values are
+//			reserved. (R, W, setbycreate) (optional) (1 byte)
+//
+//		Redundant Sip Agent Pointer
+//			Redundant SIP agent pointer: This attribute points to another SIP agent config data ME, which is
+//			understood to provide redundancy. The initial SIP agent is determined by the pointer from the
+//			SIP user data ME. It is the manager's responsibility to provision a group of redundant SIP
+//			agents with mutually consistent attributes. (R, W, setbycreate) (optional) (2 bytes)
+//
+type SipAgentConfigData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	sipagentconfigdataBME = &ManagedEntityDefinition{
+		Name:    "SipAgentConfigData",
+		ClassID: 150,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("ProxyServerAddressPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("OutboundProxyAddressPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("PrimarySipDns", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  Uint32Field("SecondarySipDns", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("TcpUdpPointer", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  Uint32Field("SipRegExpTime", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  Uint32Field("SipReregHeadStartTime", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  Uint16Field("HostPartUri", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("SipStatus", 0, mapset.NewSetWith(Read), true, false, false, false, 9),
+			10: Uint16Field("SipRegistrar", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: Uint32Field("Softswitch", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 11),
+			12: MultiByteField("SipResponseTable", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: ByteField("SipOptionTransmitControl", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 13),
+			14: ByteField("SipUriFormat", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 14),
+			15: Uint16Field("RedundantSipAgentPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewSipAgentConfigData (class ID 150 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSipAgentConfigData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(sipagentconfigdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/sipagentperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/sipagentperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..2d59f4e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/sipagentperformancemonitoringhistorydata.go
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SipAgentPerformanceMonitoringHistoryDataClassId ClassID = ClassID(151)
+
+var sipagentperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// SipAgentPerformanceMonitoringHistoryData (class ID #151)
+//	This ME collects PM data for the associated VoIP SIP agent. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with a SIP agent config data or SIP config portal object.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the corresponding SIP agent config
+//			data or to the SIP config portal. If a non-OMCI configuration method is used for VoIP, there can
+//			be only one live ME instance, associated with the SIP config portal, and with ME ID 0. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Transactions
+//			Transactions: This attribute counts the number of new transactions that were initiated. (R)
+//			(optional) (4 bytes)
+//
+//		Rx Invite Reqs
+//			Rx invite reqs: This attribute counts received invite messages, including retransmissions. (R)
+//			(optional) (4 bytes)
+//
+//		Rx Invite Retrans
+//			Rx invite retrans: This attribute counts received invite retransmission messages. (R) (optional)
+//			(4 bytes)
+//
+//		Rx Noninvite Reqs
+//			Rx noninvite reqs: This attribute counts received non-invite messages, including
+//			retransmissions. (R) (optional) (4 bytes)
+//
+//		Rx Noninvite Retrans
+//			Rx noninvite retrans: This attribute counts received non-invite retransmission messages. (R)
+//			(optional) (4 bytes)
+//
+//		Rx Response
+//			Rx response:	This attribute counts total responses received. (R) (optional) (4 bytes)
+//
+//		Rx Response Retransmissions
+//			Rx response retransmissions: This attribute counts total response retransmissions received. (R)
+//			(optional) (4 bytes)
+//
+//		Tx Invite Reqs
+//			Tx invite reqs: This attribute counts transmitted invite messages, including retransmissions.
+//			(R) (optional) (4 bytes)
+//
+//		Tx Invite Retrans
+//			Tx invite retrans: This attribute counts transmitted invite retransmission messages. (R)
+//			(optional) (4 bytes)
+//
+//		Tx Noninvite Reqs
+//			Tx noninvite reqs: This attribute counts transmitted non-invite messages, including
+//			retransmissions. (R) (optional) (4 bytes)
+//
+//		Tx Noninvite Retrans
+//			Tx noninvite retrans: This attribute counts transmitted non-invite retransmission messages. (R)
+//			(optional) (4 bytes)
+//
+//		Tx Response
+//			Tx response: This attribute counts the total responses sent. (R) (optional) (4 bytes)
+//
+//		Tx Response Retransmissions
+//			Tx response retransmissions: This attribute counts total response retransmissions sent. (R)
+//			(optional) (4 bytes)
+//
+type SipAgentPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	sipagentperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "SipAgentPerformanceMonitoringHistoryData",
+		ClassID: 151,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("Transactions", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  Uint32Field("RxInviteReqs", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5:  Uint32Field("RxInviteRetrans", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  Uint32Field("RxNoninviteReqs", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7:  Uint32Field("RxNoninviteRetrans", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8:  Uint32Field("RxResponse", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint32Field("RxResponseRetransmissions", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: Uint32Field("TxInviteReqs", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint32Field("TxInviteRetrans", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: Uint32Field("TxNoninviteReqs", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+			13: Uint32Field("TxNoninviteRetrans", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint32Field("TxResponse", 0, mapset.NewSetWith(Read), false, false, true, false, 14),
+			15: Uint32Field("TxResponseRetransmissions", 0, mapset.NewSetWith(Read), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewSipAgentPerformanceMonitoringHistoryData (class ID 151 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSipAgentPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(sipagentperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/sipcallinitiationperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/sipcallinitiationperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..5abdc6a
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/sipcallinitiationperformancemonitoringhistorydata.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SipCallInitiationPerformanceMonitoringHistoryDataClassId ClassID = ClassID(152)
+
+var sipcallinitiationperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// SipCallInitiationPerformanceMonitoringHistoryData (class ID #152)
+//	This ME collects PM data related to call initiations of a VoIP SIP agent. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the SIP agent config data or SIP config
+//		portal ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the SIP agent config data or the
+//			SIP config portal ME. If a nonOMCI configuration method is used for VoIP, there can be only one
+//			live ME instance, associated with the SIP config portal, and with ME ID 0. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Failed To Connect Counter
+//			Failed to connect counter: This attribute counts the number of times that the SIP UA failed to
+//			reach/connect its TCP/UDP peer during SIP call initiations. (R) (mandatory) (4 bytes)
+//
+//		Failed To Validate Counter
+//			Failed to validate counter: This attribute counts the number of times that the SIP UA failed to
+//			validate its peer during SIP call initiations. (R) (mandatory) (4 bytes)
+//
+//		Timeout Counter
+//			Timeout counter: This attribute counts the number of times that the SIP UA timed out during SIP
+//			call initiations. (R) (mandatory) (4 bytes)
+//
+//		Failure Received Counter
+//			Failure received counter: This attribute counts the number of times that the SIP UA received a
+//			failure error code during SIP call initiations. (R) (mandatory) (4 bytes)
+//
+//		Failed To Authenticate Counter
+//			Failed to authenticate counter: This attribute counts the number of times that the SIP UA failed
+//			to authenticate itself during SIP call initiations. (R) (mandatory) (4 bytes)
+//
+type SipCallInitiationPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	sipcallinitiationperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "SipCallInitiationPerformanceMonitoringHistoryData",
+		ClassID: 152,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("FailedToConnectCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("FailedToValidateCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("TimeoutCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("FailureReceivedCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("FailedToAuthenticateCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewSipCallInitiationPerformanceMonitoringHistoryData (class ID 152 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSipCallInitiationPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(sipcallinitiationperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/sipuserdata.go b/vendor/github.com/cboling/omci/generated/sipuserdata.go
new file mode 100644
index 0000000..f87ec4f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/sipuserdata.go
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SipUserDataClassId ClassID = ClassID(153)
+
+var sipuserdataBME *ManagedEntityDefinition
+
+// SipUserData (class ID #153)
+//	The SIP user data defines the user specific configuration attributes associated with a specific
+//	VoIP CTP. This entity is conditionally required for ONUs that offer VoIP SIP services. If a non-
+//	OMCI interface is used to manage SIP for VoIP, this ME is unnecessary. The non-OMCI interface
+//	supplies the necessary data, which may be read back to the OLT via the SIP config portal ME.
+//
+//	An instance of this ME is created and deleted by the OLT. A SIP user data instance is required
+//	for each POTS UNI port using SIP protocol and configured by the OMCI.
+//
+//	Relationships
+//		An instance of this ME is associated with one VoIP voice CTP ME and a PPTP POTS UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Sip Agent Pointer
+//			SIP agent pointer: This attribute points to the SIP agent config data ME to be used for
+//			signalling. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		User Part Aor
+//			User part AOR: This attribute points to a large string that contains the user identification
+//			part of the address of record. This can take the form of an alphanumeric string or the
+//			subscriber's directory number. A null pointer indicates the absence of an AOR. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Sip Display Name
+//			SIP display name: This ASCII string attribute defines the customer ID used for the display
+//			attribute in outgoing SIP messages. The default value is null (all zero bytes) (R, W)
+//			(mandatory) (25 bytes)
+//
+//		Username And Password
+//			Username and password: This attribute points to an authentication security method ME that
+//			contains the SIP user name and password used for authentication. A null pointer indicates no
+//			username and password. (R, W, setbycreate) (mandatory) (2)
+//
+//		Voicemail Server Sip Uri
+//			Voicemail server SIP URI: This attribute points to a network address ME that contains the name
+//			(IP address or URI) of the SIP voicemail server for SIP signalling messages. A null pointer
+//			indicates the absence of a SIP voicemail server. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Voicemail Subscription Expiration Time
+//			Voicemail subscription expiration time: This attribute defines the voicemail subscription
+//			expiration time in seconds. If this value is 0, the SIP agent uses an implementation-specific
+//			value. This attribute is recommended to be set to 3600 s by default. (R, W, setbycreate)
+//			(mandatory) (4 bytes)
+//
+//		Network Dial Plan Pointer
+//			Network dial plan pointer: This attribute points to a network dial plan table. A null pointer
+//			indicates the absence of a network dial plan. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Application Services Profile Pointer
+//			Application services profile pointer: This attribute points to a VoIP application services
+//			profile. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Feature Code Pointer
+//			Feature code pointer: This attribute points to the VoIP feature access codes ME for this
+//			subscriber. A null pointer indicates the absence of a VoIP feature access codes ME. (R, W, set-
+//			by-create) (mandatory) (2 bytes)
+//
+//		Pptp Pointer
+//			PPTP pointer: This attribute points to the PPTP POTS UNI ME that provides the analogue telephony
+//			adaptor (ATA) function. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Release Timer
+//			Release timer: This attribute contains a release timer defined in seconds. The value 0 specifies
+//			that the ONU is to use its internal default. The default value of this attribute is 10 s. (R, W)
+//			(optional) (1 byte)
+//
+//		Receiver Off Hook Roh Timer
+//			Receiver off hook (ROH) timer:	This attribute defines the time in seconds for the ROH condition
+//			before ROH tone is applied. The value 0 disables ROH timing. The value 0xFF specifies that the
+//			ONU is to use its internal default, which may or may not be the same as the 15 s OMCI default
+//			value. (R, W) (optional) (1 byte)
+//
+type SipUserData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	sipuserdataBME = &ManagedEntityDefinition{
+		Name:    "SipUserData",
+		ClassID: 153,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("SipAgentPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("UserPartAor", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  MultiByteField("SipDisplayName", 25, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  Uint16Field("UsernameAndPassword", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("VoicemailServerSipUri", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint32Field("VoicemailSubscriptionExpirationTime", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  Uint16Field("NetworkDialPlanPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  Uint16Field("ApplicationServicesProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  Uint16Field("FeatureCodePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: Uint16Field("PptpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: ByteField("ReleaseTimer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("ReceiverOffHookRohTimer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewSipUserData (class ID 153 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSipUserData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(sipuserdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/snmpconfigurationdata.go b/vendor/github.com/cboling/omci/generated/snmpconfigurationdata.go
new file mode 100644
index 0000000..9c4b339
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/snmpconfigurationdata.go
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SnmpConfigurationDataClassId ClassID = ClassID(335)
+
+var snmpconfigurationdataBME *ManagedEntityDefinition
+
+// SnmpConfigurationData (class ID #335)
+//	The SNMP configuration data ME provides a way for the OLT to provision an IP path for an SNMP
+//	management agent.
+//
+//	The SNMP configuration data ME is created and deleted by the OLT.
+//
+//	Relationships
+//		One instance of this ME is created by the OLT for each SNMP management path termination.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The ME IDs 0 and
+//			0xFFFF are reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Snmp Version
+//			SNMP version: This integer attribute is the SNMP protocol version to be supported. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Snmp Agent Address
+//			SNMP agent address: This attribute is a pointer to a TCP/UDP config data ME, which provides the
+//			SNMP agent. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Snmp Server Address
+//			SNMP server address: This attribute is the IP address of the SNMP server. (R, W, setbycreate)
+//			(mandatory) (4 bytes)
+//
+//		Snmp Server Port
+//			SNMP server port: This attribute is the UDP port number of the SNMP server. (R, W, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Security Name Pointer
+//			Security name pointer: This attribute points to a large string whose content represents the SNMP
+//			security name in a human-readable format that is independent of the security model. SecurityName
+//			is defined in [b-IETF RFC 2571]. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Community For Read
+//			Community for read: This attribute is a pointer to a large string that contains the name of the
+//			read community. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Community For Write
+//			Community for write: This attribute is a pointer to a large string that contains the name of the
+//			write community. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Sys Name Pointer
+//			Sys name pointer: This attribute points to a large string whose content identifies the SNMP
+//			system name. SysName is defined in [b-IETF RFC 3418]. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+type SnmpConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	snmpconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "SnmpConfigurationData",
+		ClassID: 335,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("SnmpVersion", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("SnmpAgentAddress", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("SnmpServerAddress", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("SnmpServerPort", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: Uint16Field("SecurityNamePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: Uint16Field("CommunityForRead", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: Uint16Field("CommunityForWrite", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8: Uint16Field("SysNamePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewSnmpConfigurationData (class ID 335 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSnmpConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(snmpconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/softwareimage.go b/vendor/github.com/cboling/omci/generated/softwareimage.go
new file mode 100644
index 0000000..ff90b00
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/softwareimage.go
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const SoftwareImageClassId ClassID = ClassID(7)
+
+var softwareimageBME *ManagedEntityDefinition
+
+// SoftwareImage (class ID #7)
+//	This ME models an executable software image stored in the ONU (documented here as its
+//	fundamental usage). It may also be used to represent an opaque vendor-specific file
+//	(vendorspecific usage).
+//
+//	Fundamental usage
+//
+//	The ONU automatically creates two instances of this ME upon the creation of each ME that
+//	contains independently manageable software, either the ONU itself or an individual circuit pack.
+//	It populates ME attributes according to data within the ONU or the circuit pack.
+//
+//	Some pluggable equipment may not contain software. Others may contain software that is
+//	intrinsically bound to the ONU's own software image. No software image ME need exist for such
+//	equipment, though it may be convenient for the ONU to create them to support software version
+//	audit from the OLT. In this case, the dependent MEs would support only the get action.
+//
+//	A slot may contain various equipment over its lifetime, and if software image MEs exist, the ONU
+//	must automatically create and delete them as the equipped configuration changes. The identity of
+//	the software image is tied to the cardholder.
+//
+//	When an ONU controller packs are duplicated, each can be expected to contain two software image
+//	MEs, managed through reference to the individual controller packs themselves. When this occurs,
+//	the ONU should not have a global pair of software images MEs (instance 0), since an action
+//	(download, activate, commit) directed to instance 0 would be ambiguous.
+//
+//	Relationships
+//		Two instances of the software image ME are associated with each instance of the ONU or
+//		cardholder whose software is independently managed.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The first byte
+//			indicates the physical location of the equipment hosting the software image, either the ONU (0)
+//			or a cardholder (1..254). The second byte distinguishes between the two software image ME
+//			instances (0..1). (R) (mandatory) (2 bytes)
+//
+//		Version
+//			Version:	This string attribute identifies the version of the software. (R) (mandatory)
+//			(14 bytes)
+//
+//		Is Committed
+//			Is committed: This attribute indicates whether the associated software image is committed (1) or
+//			uncommitted (0). By definition, the committed software image is loaded and executed upon reboot
+//			of the ONU or circuit pack. During normal operation, one software image is always committed,
+//			while the other is uncommitted. Under no circumstances are both software images allowed to be
+//			committed at the same time. On the other hand, both software images could be uncommitted at the
+//			same time if both were invalid. Upon ME instantiation, instance 0 is initialized to committed,
+//			while instance 1 is initialized to uncommitted (i.e., the ONU ships from the factory with image
+//			0 committed). (R) (mandatory) (1 byte)
+//
+//		Is Active
+//			Is active:	This attribute indicates whether the associated software image is active (1) or
+//			inactive (0). By definition, the active software image is one that is currently loaded and
+//			executing in the ONU or circuit pack. Under normal operation, one software image is always
+//			active while the other is inactive. Under no circumstances are both software images allowed to
+//			be active at the same time. On the other hand, both software images could be inactive at the
+//			same time if both were invalid. (R) (mandatory) (1 byte)
+//
+//		Is Valid
+//			Is valid:	This attribute indicates whether the associated software image is valid (1) or invalid
+//			(0). By definition, a software image is valid if it has been verified to be an executable code
+//			image. The verification mechanism is not subject to standardization; however, it should include
+//			at least a data integrity check [e.g., a cyclic redundancy check (CRC)] of the entire code
+//			image. Upon ME instantiation or software download completion, the ONU validates the associated
+//			code image and sets this attribute according to the result. (R) (mandatory) (1 byte)
+//
+//		Product Code
+//			Product code:	This attribute provides a way for a vendor to indicate product code information on
+//			a file. It is a character string, padded with trailing nulls if it is shorter than 25 bytes. (R)
+//			(optional) (25 bytes)
+//
+//		Image Hash
+//			Image hash:	This attribute is an MD5 hash of the software image. It is computed at completion of
+//			the end download action. (R) (optional) (16 bytes)
+//
+type SoftwareImage struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	softwareimageBME = &ManagedEntityDefinition{
+		Name:    "SoftwareImage",
+		ClassID: 7,
+		MessageTypes: mapset.NewSetWith(
+			ActivateSoftware,
+			CommitSoftware,
+			DownloadSection,
+			EndSoftwareDownload,
+			Get,
+			StartSoftwareDownload,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: MultiByteField("Version", 14, nil, mapset.NewSetWith(Read), true, false, false, false, 1),
+			2: ByteField("IsCommitted", 0, mapset.NewSetWith(Read), true, false, false, false, 2),
+			3: ByteField("IsActive", 0, mapset.NewSetWith(Read), true, false, false, false, 3),
+			4: ByteField("IsValid", 0, mapset.NewSetWith(Read), true, false, false, false, 4),
+			5: MultiByteField("ProductCode", 25, nil, mapset.NewSetWith(Read), true, false, true, false, 5),
+			6: MultiByteField("ImageHash", 16, nil, mapset.NewSetWith(Read), true, false, true, false, 6),
+		},
+	}
+}
+
+// NewSoftwareImage (class ID 7 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewSoftwareImage(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(softwareimageBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/t-cont.go b/vendor/github.com/cboling/omci/generated/t-cont.go
new file mode 100644
index 0000000..c0cc82b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/t-cont.go
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TContClassId ClassID = ClassID(262)
+
+var tcontBME *ManagedEntityDefinition
+
+// TCont (class ID #262)
+//	An instance of the traffic container ME T-CONT represents a logical connection group associated
+//	with a G-PON PLOAM layer alloc-ID. A T-CONT can accommodate GEM packets in priority queues or
+//	traffic schedulers that exist in the GEM layer.
+//
+//	The ONU autonomously creates instances of this ME. The OLT can discover the number of TCONT
+//	instances via the ANI-G ME. When the ONU's MIB is reset or created for the first time, all
+//	supported T-CONTs are created. The OLT provisions alloc-IDs to the ONU via the PLOAM channel.
+//	Via the OMCI, the OLT must then set the alloc-ID attributes in the T-CONTs that it wants to
+//	activate for user traffic, to create the appropriate association with the allocation ID in the
+//	PLOAM channel. There should be a one-to-one relationship between allocation IDs and T-CONT MEs;
+//	the connection of multiple T-CONTs to a single allocation ID is undefined.
+//
+//	The allocation ID that matches the ONU-ID itself is defined to be the default alloc-ID. This
+//	allocID is used to carry the OMCC. The default alloc-ID can also be used to carry user traffic,
+//	and hence can be assigned to one of the T-CONT MEs. However, this OMCI relationship only
+//	pertains to user traffic, and the OMCC relationship is unaffected. It can also be true that the
+//	OMCC is not contained in any T-CONT ME construct; rather, that the OMCC remains outside of the
+//	OMCI, and that the OMCI is not used to manage the OMCC in any way. Multiplexing of the OMCC and
+//	user data in GPON systems is discussed in clause B.2.4.
+//
+//	Relationships
+//		One or more instances of this ME are associated with an instance of a circuit pack that supports
+//		a PON interface function, or with the ONU-G itself.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical capability that realizes the TCONT. It may be represented as
+//			0xSSBB, where SS indicates the slot ID that contains this T-CONT (0 for the ONU as a whole), and
+//			BB is the TCONT ID, numbered by the ONU itself. T-CONTs are numbered in ascending order, with
+//			the range 0..255 in each slot. (R) (mandatory) (2 bytes)
+//
+//		Alloc_Id
+//			Alloc-ID:	This attribute links the T-CONT with the alloc-ID assigned by the OLT in the
+//			assign_alloc-ID PLOAM message. The respective TC layer specification should be referenced for
+//			the legal values for that system. Prior to the setting of this attribute by the OLT, this
+//			attribute has an unambiguously unusable initial value, namely the value 0x00FF or 0xFFFF for
+//			ITU-T G.984 systems, and the value 0xFFFF for all other ITU-T GTC based PON systems. (R, W)
+//			(mandatory) (2 bytes)
+//
+//		Deprecated
+//			Deprecated:	The ONU should set this attribute to the value 1, and the OLT should ignore it. (R)
+//			(mandatory) (1 byte)
+//
+//		Policy
+//			NOTE – This attribute is read-only, unless otherwise specified by the QoS configuration
+//			flexibility attribute of the ONU2-G ME. If flexible configuration is not supported, the ONU
+//			should reject an attempt to set it with a parameter error result-reason code.
+//
+type TCont struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	tcontBME = &ManagedEntityDefinition{
+		Name:    "TCont",
+		ClassID: 262,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("AllocId", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: ByteField("Deprecated", 0, mapset.NewSetWith(Read), false, false, false, true, 2),
+			3: ByteField("Policy", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewTCont (class ID 262 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTCont(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(tcontBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/tcadaptorperformancemonitoringhistorydataxdsl.go b/vendor/github.com/cboling/omci/generated/tcadaptorperformancemonitoringhistorydataxdsl.go
new file mode 100644
index 0000000..feb1f0c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/tcadaptorperformancemonitoringhistorydataxdsl.go
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TcAdaptorPerformanceMonitoringHistoryDataXdslClassId ClassID = ClassID(116)
+
+var tcadaptorperformancemonitoringhistorydataxdslBME *ManagedEntityDefinition
+
+// TcAdaptorPerformanceMonitoringHistoryDataXdsl (class ID #116)
+//	This ME collects PM data of an xTUC to xTUR ATM data path. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID:	This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1 _2 Id
+//			Threshold data1/2 ID: This attribute points to an instance of the threshold data1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Near_End Hec Violation Count
+//			Near-end HEC violation count: This attribute counts near-end HEC anomalies in the ATM data path.
+//			(R) (mandatory) (2 bytes)
+//
+//		Near_End Delineated Total Cell Count Cd P
+//			Near-end delineated total cell count (CDP): This attribute counts the total number of cells
+//			passed through the cell delineation and HEC function process operating on the ATM data path
+//			while in the SYNC state. (R) (mandatory) (4 bytes)
+//
+//		Near_End User Total Cell Count Cu_P
+//			Near-end user total cell count(CU-P): This attribute counts the total number of cells in the ATM
+//			data path delivered at the V-C interface. (R) (mandatory) (4 bytes)
+//
+//		Near_End Idle Cell Bit Error Count
+//			Near-end idle cell bit error count: This attribute counts cells with bit errors in the ATM data
+//			path idle payload received at the near end. (R) (mandatory) (2 bytes)
+//
+//		Far_End Hec Violation Count
+//			Far-end HEC violation count: This attribute counts far-end HEC anomalies in the ATM data path.
+//			(R) (mandatory) (2 bytes)
+//
+//		Far_End Delineated Total Cell Count Cd_Pfe
+//			Far-end delineated total cell count (CD-PFE): This attribute counts the total number of cells
+//			passed through the cell delineation process and HEC function operating on the ATM data path
+//			while in the SYNC state. (R) (mandatory) (4 bytes)
+//
+//		Far_End User Total Cell Count Cu_Pfe
+//			Far-end user total cell count (CU-PFE): This attribute counts the total number of cells in the
+//			ATM data path delivered at the T-R interface. (R) (mandatory) (4 bytes)
+//
+//		Far_End Idle Cell Bit Error Count
+//			Far-end idle cell bit error count: This attribute counts cells with bit errors in the ATM data
+//			path idle payload received at the far end. (R) (mandatory) (2 bytes)
+//
+type TcAdaptorPerformanceMonitoringHistoryDataXdsl struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	tcadaptorperformancemonitoringhistorydataxdslBME = &ManagedEntityDefinition{
+		Name:    "TcAdaptorPerformanceMonitoringHistoryDataXdsl",
+		ClassID: 116,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("NearEndHecViolationCount", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("NearEndDelineatedTotalCellCountCdP", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("NearEndUserTotalCellCountCuP", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint16Field("NearEndIdleCellBitErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint16Field("FarEndHecViolationCount", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("FarEndDelineatedTotalCellCountCdPfe", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("FarEndUserTotalCellCountCuPfe", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint16Field("FarEndIdleCellBitErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+		},
+	}
+}
+
+// NewTcAdaptorPerformanceMonitoringHistoryDataXdsl (class ID 116 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTcAdaptorPerformanceMonitoringHistoryDataXdsl(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(tcadaptorperformancemonitoringhistorydataxdslBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/tcpudpconfigdata.go b/vendor/github.com/cboling/omci/generated/tcpudpconfigdata.go
new file mode 100644
index 0000000..76c26af
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/tcpudpconfigdata.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TcpUdpConfigDataClassId ClassID = ClassID(136)
+
+var tcpudpconfigdataBME *ManagedEntityDefinition
+
+// TcpUdpConfigData (class ID #136)
+//	The TCP/UDP config data ME configures services based on the transmission control protocol (TCP)
+//	and user datagram protocol (UDP) that are offered from an IP host. If a non-OMCI interface is
+//	used to manage an IP service, this ME is unnecessary; the non-OMCI interface supplies the
+//	necessary data.
+//
+//	An instance of this ME is created and deleted on request of the OLT.
+//
+//	Relationships
+//		One or more instances of this ME may be associated with an instance of an IP host config data or
+//		IPv6 host config data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. It is
+//			recommended that the ME ID be the same as the port number. (R, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Port Id
+//			Port ID:	This attribute specifies the port number that offers the TCP/UDP service. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Protocol
+//			Protocol:	This attribute specifies the protocol type as defined by [b-IANA] (protocol numbers),
+//			for example UDP (0x11). (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Tos_Diffserv Field
+//			TOS/diffserv field: This attribute specifies the value of the TOS/diffserv field of the IPv4
+//			header. The contents of this attribute may contain the type of service per [IETF RFC 2474] or a
+//			DSCP. Valid values for DSCP are as defined by [b-IANA] (differentiated services field code
+//			points). (R, W, set-by-create) (mandatory) (1 byte)
+//
+//		Ip Host Pointer
+//			IP host pointer: This attribute points to the IP host config data or IPv6 host config data ME
+//			associated with this TCP/UDP data. Any number of ports and protocols may be associated with an
+//			IP host. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+type TcpUdpConfigData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	tcpudpconfigdataBME = &ManagedEntityDefinition{
+		Name:    "TcpUdpConfigData",
+		ClassID: 136,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("PortId", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("Protocol", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("TosDiffservField", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("IpHostPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+		},
+	}
+}
+
+// NewTcpUdpConfigData (class ID 136 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTcpUdpConfigData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(tcpudpconfigdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/tcpudpperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/tcpudpperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..890b090
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/tcpudpperformancemonitoringhistorydata.go
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TcpUdpPerformanceMonitoringHistoryDataClassId ClassID = ClassID(342)
+
+var tcpudpperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// TcpUdpPerformanceMonitoringHistoryData (class ID #342)
+//	This ME collects PM data related to a TCP or UDP port. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the TCP/UDP config data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TCP/UDP config data ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Socket Failed
+//			Socket failed:	This attribute is incremented when an attempt to create a socket associated with
+//			a port fails. (R) (mandatory) (2 bytes)
+//
+//		Listen Failed
+//			Listen failed:	This attribute is incremented when an attempt by a service to listen for a
+//			request on a port fails. (R) (mandatory) (2 bytes)
+//
+//		Bind Failed
+//			Bind failed:	This attribute is incremented when an attempt by a service to bind to a port fails.
+//			(R) (mandatory) (2 bytes)
+//
+//		Accept Failed
+//			Accept failed: This attribute is incremented when an attempt to accept a connection on a port
+//			fails. (R) (mandatory) (2 bytes)
+//
+//		Select Failed
+//			Select failed:	This attribute is incremented when an attempt to perform a select on a group of
+//			ports fails. (R) (mandatory) (2 bytes)
+//
+type TcpUdpPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	tcpudpperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "TcpUdpPerformanceMonitoringHistoryData",
+		ClassID: 342,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("SocketFailed", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint16Field("ListenFailed", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint16Field("BindFailed", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint16Field("AcceptFailed", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint16Field("SelectFailed", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewTcpUdpPerformanceMonitoringHistoryData (class ID 342 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTcpUdpPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(tcpudpperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/thresholddata1.go b/vendor/github.com/cboling/omci/generated/thresholddata1.go
new file mode 100644
index 0000000..a83e671
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/thresholddata1.go
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ThresholdData1ClassId ClassID = ClassID(273)
+
+var thresholddata1BME *ManagedEntityDefinition
+
+// ThresholdData1 (class ID #273)
+//	Threshold data are partitioned into two MEs for historical reasons. An instance of this ME,
+//	together with an optional instance of the threshold data 2 ME, contains threshold values for
+//	counters in PM history data MEs.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be related to multiple instances of PM history data type MEs.////		Paired instances of threshold data 1 ME and threshold data 2 ME are implicitly linked together
+//		through a common ME ID.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Threshold Value_1
+//			Threshold value 1: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_2
+//			Threshold value 2: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_3
+//			Threshold value 3: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_4
+//			Threshold value 4: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_5
+//			Threshold value 5: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_6
+//			Threshold value 6: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_7
+//			Threshold value 7: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+type ThresholdData1 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	thresholddata1BME = &ManagedEntityDefinition{
+		Name:    "ThresholdData1",
+		ClassID: 273,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint32Field("ThresholdValue1", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint32Field("ThresholdValue2", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("ThresholdValue3", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint32Field("ThresholdValue4", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: Uint32Field("ThresholdValue5", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: Uint32Field("ThresholdValue6", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: Uint32Field("ThresholdValue7", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewThresholdData1 (class ID 273 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewThresholdData1(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(thresholddata1BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/thresholddata2.go b/vendor/github.com/cboling/omci/generated/thresholddata2.go
new file mode 100644
index 0000000..2b7b0a3
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/thresholddata2.go
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const ThresholdData2ClassId ClassID = ClassID(274)
+
+var thresholddata2BME *ManagedEntityDefinition
+
+// ThresholdData2 (class ID #274)
+//	Together with an instance of the threshold data 1 ME, an instance of this ME contains threshold
+//	values for counters maintained in one or more instances of PM history data MEs.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Instances of this ME are created and deleted by the OLT.
+//
+//	Relationships
+//		Refer to the relationships of the threshold data 1 ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Its value is the
+//			same as that of the paired threshold data 1 instance. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Threshold Value_8
+//			Threshold value 8: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_9
+//			Threshold value 9: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_10
+//			Threshold value 10: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_11
+//			Threshold value 11: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_12
+//			Threshold value 12: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_13
+//			Threshold value 13: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Threshold Value_14
+//			Threshold value 14: (R, W, setbycreate) (mandatory) (4 bytes)
+//
+type ThresholdData2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	thresholddata2BME = &ManagedEntityDefinition{
+		Name:    "ThresholdData2",
+		ClassID: 274,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint32Field("ThresholdValue8", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint32Field("ThresholdValue9", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("ThresholdValue10", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint32Field("ThresholdValue11", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: Uint32Field("ThresholdValue12", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: Uint32Field("ThresholdValue13", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: Uint32Field("ThresholdValue14", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+		},
+	}
+}
+
+// NewThresholdData2 (class ID 274 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewThresholdData2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(thresholddata2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/trafficdescriptor.go b/vendor/github.com/cboling/omci/generated/trafficdescriptor.go
new file mode 100644
index 0000000..80598bb
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/trafficdescriptor.go
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TrafficDescriptorClassId ClassID = ClassID(280)
+
+var trafficdescriptorBME *ManagedEntityDefinition
+
+// TrafficDescriptor (class ID #280)
+//	The traffic descriptor is a profile that allows for traffic management. A priority controlled
+//	ONU can point from a MAC bridge port configuration data ME to a traffic descriptor in order to
+//	implement traffic management (marking, policing). A rate controlled ONU can point to a traffic
+//	descriptor from either a MAC bridge port configuration data ME or a GEM port network CTP to
+//	implement traffic management (marking, shaping).
+//
+//	Packets are determined to be green, yellow or red as a function of the ingress packet rate and
+//	the settings in this ME. The colour indicates drop precedence (eligibility), subsequently used
+//	by the priority queue ME to drop packets conditionally during congestion conditions. Packet
+//	colour is also used by the optional mode 1 DBA status reporting function described in [ITUT
+//	G.984.3]. Red packets are dropped immediately. Yellow packets are marked as drop eligible, and
+//	green packets are marked as not drop eligible, according to the egress colour marking attribute.
+//
+//	The algorithm used to determine the colour marking is specified by the meter type attribute. If
+//	[bIETF RFC 4115] is used, then:
+//
+//	CIR4115 = CIR
+//
+//	EIR4115 = PIR – CIR (EIR: excess information rate)
+//
+//	CBS4115 = CBS
+//
+//	EBS4115 = PBS – CBS.
+//
+//	Relationships
+//		This ME is associated with a GEM port network CTP or a MAC bridge port configuration data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Cir
+//			CIR:	This attribute specifies the committed information rate, in bytes per second. The default
+//			is 0. (R, W, setbycreate) (optional) (4 bytes)
+//
+//		Pir
+//			PIR:	This attribute specifies the peak information rate, in bytes per second. The default value
+//			0 accepts the ONU's factory policy. (R, W, setbycreate) (optional) (4 bytes)
+//
+//		Cbs
+//			CBS:	This attribute specifies the committed burst size, in bytes. The default is 0. (R, W,
+//			setbycreate) (optional) (4 bytes)
+//
+//		Pbs
+//			PBS:	This attribute specifies the peak burst size, in bytes. The default value 0 accepts the
+//			ONU's factory policy. (R, W, setbycreate) (optional) (4 bytes)
+//
+//		Colour Mode
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Ingress Colour Marking
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Egress Colour Marking
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		Meter Type
+//			(R, setbycreate) (optional) (1 byte)
+//
+type TrafficDescriptor struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	trafficdescriptorBME = &ManagedEntityDefinition{
+		Name:    "TrafficDescriptor",
+		ClassID: 280,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint32Field("Cir", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 1),
+			2: Uint32Field("Pir", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+			3: Uint32Field("Cbs", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4: Uint32Field("Pbs", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5: ByteField("ColourMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6: ByteField("IngressColourMarking", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 6),
+			7: ByteField("EgressColourMarking", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8: ByteField("MeterType", 0, mapset.NewSetWith(Read, SetByCreate), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewTrafficDescriptor (class ID 280 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTrafficDescriptor(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(trafficdescriptorBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/trafficscheduler.go b/vendor/github.com/cboling/omci/generated/trafficscheduler.go
new file mode 100644
index 0000000..69a6cb3
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/trafficscheduler.go
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TrafficSchedulerClassId ClassID = ClassID(278)
+
+var trafficschedulerBME *ManagedEntityDefinition
+
+// TrafficScheduler (class ID #278)
+//	NOTE 1 – In [ITU-T G.984.4], this ME is called a traffic scheduler-G.
+//
+//	An instance of this ME represents a logical object that can control upstream GEM packets. A
+//	traffic scheduler can accommodate GEM packets after a priority queue or other traffic scheduler
+//	and transfer them towards the next traffic scheduler or T-CONT. Because T-CONTs and traffic
+//	schedulers are created autonomously by the ONU, the ONU vendor predetermines the most complex
+//	traffic handling model it is prepared to support; the OLT may use less than the ONU's full
+//	capabilities, but cannot ask for more. See Appendix II for more details.
+//
+//	After the ONU creates instances of the T-CONT ME, it then autonomously creates instances of the
+//	traffic scheduler ME.
+//
+//	Relationships
+//		The traffic scheduler ME may be related to a T-CONT or other traffic schedulers through pointer
+//		attributes.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number indicates the physical capability that realizes the traffic scheduler. The first byte is
+//			the slot ID of the circuit pack with which this traffic scheduler is associated. For a traffic
+//			scheduler that is not associated with a circuit pack, the first byte is 0xFF. The second byte is
+//			the traffic scheduler id, assigned by the ONU itself. Traffic schedulers are numbered in
+//			ascending order with the range 0..0xFF in each circuit pack or in the ONU core. (R) (mandatory)
+//			(2 bytes)
+//
+//		T_Cont Pointer
+//			NOTE 2 – This attribute is read-only unless otherwise specified by the QoS configuration
+//			flexibility attribute of the ONU2-G ME. If flexible configuration is not supported, the ONU
+//			should reject an attempt to set the TCONT pointer attribute with a parameter error result-reason
+//			code.
+//
+//		Traffic Scheduler Pointer
+//			Traffic scheduler pointer: This attribute points to another traffic scheduler ME instance that
+//			may serve this traffic scheduler. This pointer is used when this traffic scheduler is connected
+//			to another traffic scheduler; it is null (0) otherwise. (R) (mandatory) (2 bytes)
+//
+//		Policy
+//			NOTE 3 – This attribute is read-only unless otherwise specified by the QoS configuration
+//			flexibility attribute of the ONU2-G ME. If flexible configuration is not supported, the ONU
+//			should reject an attempt to set the policy attribute with a parameter error result-reason code.
+//
+//		Priority_Weight
+//			Upon ME instantiation, the ONU sets this attribute to 0. (R, W) (mandatory) (1 byte)
+//
+type TrafficScheduler struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	trafficschedulerBME = &ManagedEntityDefinition{
+		Name:    "TrafficScheduler",
+		ClassID: 278,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("TContPointer", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: Uint16Field("TrafficSchedulerPointer", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: ByteField("Policy", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: ByteField("PriorityWeight", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+		},
+	}
+}
+
+// NewTrafficScheduler (class ID 278 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTrafficScheduler(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(trafficschedulerBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelmanagedentity.go b/vendor/github.com/cboling/omci/generated/twdmchannelmanagedentity.go
new file mode 100644
index 0000000..f8c2064
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelmanagedentity.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelManagedEntityClassId ClassID = ClassID(443)
+
+var twdmchannelmanagedentityBME *ManagedEntityDefinition
+
+// TwdmChannelManagedEntity (class ID #443)
+//	This ME provides an anchor for the MEs involved in collection of PM statistics per TWDM channel,
+//	as stipulated by clause 14 of [ITU-T G.989.3]. Instances of this ME are instantiated
+//	autonomously by the ONU.
+//
+//	Relationships
+//		One or more instances of this ME are implicitly associated with the TWDM System profile ME. The
+//		number of instances created is announced by the total TWDM channel number attribute of the TWDM
+//		system profile ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. This 2 byte
+//			number is represented as 0xSSBB, where SS indicates the ONU slot ID, and BB is the TWDM channel
+//			ME number assigned by the ONU itself, starting from 0 in the ascending order. (R) (mandatory)
+//			(2 bytes)
+//
+//		Active Channel Indication
+//			Active channel indication: The default value is false. The ONU sets the attribute to true when
+//			it receives the Channel_Profile PLOAM messages for that channel. The ONU clears the attribute
+//			when it receives the Channel_Profile PLOAM message marked “void” for that channel. (R)
+//			(mandatory) (1 byte)
+//
+//		Operational Channel Indication
+//			Operational channel indication: A Boolean attribute that is set to true for an active TWDM
+//			channel in which the ONT is currently operating. The operational statistic is accumulated in the
+//			PM history data MEs associated with that TWDM channel. (R) (mandatory) (1 byte)
+//
+//		Downstream Wavelength Channel
+//			Downstream wavelength channel: For an active TWDM channel, this attribute identifies the
+//			downstream wavelength channel in reference to Table 11-2 of [ITU-T G.989.2]. For an inactive
+//			channel it has value 0xFF. (R) (mandatory) (1 byte)
+//
+//		Upstream Wavelength Channel
+//			Upstream wavelength channel: For an active TWDM channel, this attribute identifies the upstream
+//			wavelength channel in reference to Table VIII-5 of [ITU-T G.989.2]. For an inactive channel its
+//			value of 0xFF. (R) (mandatory) (1 byte)
+//
+type TwdmChannelManagedEntity struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelmanagedentityBME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelManagedEntity",
+		ClassID: 443,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("ActiveChannelIndication", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("OperationalChannelIndication", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: ByteField("DownstreamWavelengthChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: ByteField("UpstreamWavelengthChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+		},
+	}
+}
+
+// NewTwdmChannelManagedEntity (class ID 443 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelManagedEntity(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelmanagedentityBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelomciperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/twdmchannelomciperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..2be87e4
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelomciperformancemonitoringhistorydata.go
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelOmciPerformanceMonitoringHistoryDataClassId ClassID = ClassID(452)
+
+var twdmchannelomciperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// TwdmChannelOmciPerformanceMonitoringHistoryData (class ID #452)
+//	This ME collects OMCI-related PM data associated with the slot/circuit pack, hosting one or more
+//	ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and deleted by the OLT.
+//
+//	The counters maintained by this ME are characterized as optional in Clause 14 of
+//	[ITU-T G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Omci Baseline Message Count
+//			OMCI baseline message count: The counter of baseline format OMCI messages directed to the given
+//			ONU. (R) (mandatory) (4 byte)
+//
+//		Omci Extended Message Count
+//			OMCI extended message count: The counter of extended format OMCI messages directed to the given
+//			ONU. (R) (mandatory) (4 byte)
+//
+//		Omci Mic Error Count
+//			OMCI MIC error count: The counter of OMCI messages received with MIC errors. (R) (mandatory)
+//			(4 byte)
+//
+type TwdmChannelOmciPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelomciperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelOmciPerformanceMonitoringHistoryData",
+		ClassID: 452,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("OmciBaselineMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("OmciExtendedMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("OmciMicErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewTwdmChannelOmciPerformanceMonitoringHistoryData (class ID 452 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelOmciPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelomciperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelphylodsperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/twdmchannelphylodsperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..a4f0e84
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelphylodsperformancemonitoringhistorydata.go
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelPhyLodsPerformanceMonitoringHistoryDataClassId ClassID = ClassID(444)
+
+var twdmchannelphylodsperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// TwdmChannelPhyLodsPerformanceMonitoringHistoryData (class ID #444)
+//	This ME collects certain PM data associated with the slot/circuit pack, hosting one or more
+//	ANI-G MEs, and a specific TWDM channel. Instances of this ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Total Received Words Protected By Bit_Interleaved Parity _32 Bip_32
+//			Total received words protected by bit-interleaved parity-32 (BIP-32): The count of 4 byte words
+//			included in BIP-32 check. This is a product of the number of downstream FS frames received by
+//			the size of the downstream FS frame after the FEC parity byte, if any, have been removed. The
+//			count applies to the entire downstream data flow, whether or not addressed to that ONT. (R)
+//			(mandatory) (8 bytes)
+//
+//		Bip_32 Bit Error Count
+//			BIP-32 bit error count: Count of the bit errors in the received downstream FS frames as measured
+//			using BIP-32. If FEC is supported in the downstream direction, the BIP-32 count applies to the
+//			downstream FS frame after the FEC correction has been applied and the FEC parity bytes have been
+//			removed. (R) (mandatory) (4 bytes)
+//
+//		Corrected Psbd Hec Error Count
+//			Corrected PSBd HEC error count: The count of the errors in either CFC or OCS fields of the PSBd
+//			block that have been corrected using the HEC technique. (R) (mandatory) (4 bytes)
+//
+//		Uncorrectable Psbd Hec Error Count
+//			Uncorrectable PSBd HEC error count: The count of the errors in either CFC or OCS fields of the
+//			PSBd block that could not be corrected using the HEC technique. (R) (mandatory) (4 bytes)
+//
+//		Corrected Downstream Fs Header Hec Error Count
+//			Corrected downstream FS header HEC error count: The count of the errors in the downstream FS
+//			header that have been corrected using the HEC technique. (R) (mandatory) (4 bytes)
+//
+//		Uncorrectable Downstream Fs Header Hec Error Count
+//			Uncorrectable downstream FS header HEC error count: The count of the errors in the downstream FS
+//			header that could not be corrected using the HEC technique. (R) (mandatory) (4 bytes)
+//
+//		Total Number Of Lods Events
+//			Total number of LODS events: The count of the state transitions from O5.1/O5.2 to O6, referring
+//			to the ONU activation cycle state machine, clause 12 of [ITU-T G.989.3]. (R) (mandatory)
+//			(4 bytes)
+//
+//		Lods Events Restored In Operating Twdm Channel
+//			LODS events restored in operating TWDM channel: The count of LODS events cleared automatically
+//			without retuning. (R) (mandatory) (4 bytes)
+//
+//		Lods Events Restored In Protection Twdm Channel
+//			LODS events restored in protection TWDM channel: The count of LODS events resolved by retuning
+//			to a pre-configured protection TWDM channel. The event is counted against the original operating
+//			channel. (R) (mandatory) (4 bytes)
+//
+//		Lods Events Restored In Discretionary Twdm Channel
+//			LODS events restored in discretionary TWDM channel: The count of LODS events resolved by
+//			retuning to a TWDM channel chosen by the ONU, without retuning. Implies that the wavelength
+//			channel protection for the operating channel is not active. The event is counted against the
+//			original operating channel (R) (mandatory) (4 bytes)
+//
+//		Lods Events Resulting In Reactivation
+//			LODS events resulting in reactivation: The count of LODS events resolved through ONU
+//			reactivation; that is, either TO2 (without WLCP) or TO3 + TO4 (with WLCP) expires before the
+//			downstream channel is reacquired, referring to the ONU activation cycle state machine, clause 12
+//			of [ITU-T G.989.3]. The event is counted against the original operating channel (R) (mandatory)
+//			(4 bytes)
+//
+//		Lods Events Resulting In Reactivation After Retuning To Protection Twdm Channel
+//			LODS events resulting in reactivation after retuning to protection TWDM channel: The count of
+//			LODS events resolved through ONU reactivation after attempted protection switching, which turns
+//			unsuccessful due to a handshake failure. (R) (mandatory) (4 bytes)
+//
+//		Lods Events Resulting In Reactivation After Retuning To Discretionary Twdm Channel
+//			LODS events resulting in reactivation after retuning to discretionary TWDM channel: The count of
+//			LODS events resolved through ONU reactivation after attempted retuning to a discretionary
+//			channel, which turns unsuccessful due to a handshake failure. (R) (mandatory) (4 bytes)
+//
+type TwdmChannelPhyLodsPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelphylodsperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelPhyLodsPerformanceMonitoringHistoryData",
+		ClassID: 444,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint64Field("TotalReceivedWordsProtectedByBitInterleavedParity32Bip32", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("Bip32BitErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("CorrectedPsbdHecErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("UncorrectablePsbdHecErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("CorrectedDownstreamFsHeaderHecErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("UncorrectableDownstreamFsHeaderHecErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("TotalNumberOfLodsEvents", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("LodsEventsRestoredInOperatingTwdmChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("LodsEventsRestoredInProtectionTwdmChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("LodsEventsRestoredInDiscretionaryTwdmChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("LodsEventsResultingInReactivation", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("LodsEventsResultingInReactivationAfterRetuningToProtectionTwdmChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("LodsEventsResultingInReactivationAfterRetuningToDiscretionaryTwdmChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+		},
+	}
+}
+
+// NewTwdmChannelPhyLodsPerformanceMonitoringHistoryData (class ID 444 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelPhyLodsPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelphylodsperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart1.go b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart1.go
new file mode 100644
index 0000000..17acd46
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart1.go
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelPloamPerformanceMonitoringHistoryDataPart1ClassId ClassID = ClassID(446)
+
+var twdmchannelploamperformancemonitoringhistorydatapart1BME *ManagedEntityDefinition
+
+// TwdmChannelPloamPerformanceMonitoringHistoryDataPart1 (class ID #446)
+//	This ME collects certain PLOAM-related PM data associated with the slot/circuit pack, hosting
+//	one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and deleted
+//	by the OLT.
+//
+//	The downstream PLOAM message counts of this ME include only the received PLOAM messages
+//	pertaining to the given ONU, i.e.:
+//
+//	–	unicast PLOAM messages, addressed by ONU-ID;
+//
+//	–	broadcast PLOAM messages, addressed by serial number;
+//
+//	–	broadcast PLOAM messages, addressed to all ONUs on the PON.
+//
+//	This ME includes all PLOAM PM counters characterized as mandatory in clause 14 of
+//	[ITU-T G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Ploam Mic Errors
+//			PLOAM MIC errors: The counter of received PLOAM messages that remain unparsable due to MIC
+//			error. (R) (mandatory) (4 byte)
+//
+//		Downstream Ploam Message Count
+//			Downstream PLOAM message count: The counter of received broadcast and unicast PLOAM messages
+//			pertaining to the given ONU. (R) (mandatory) (4 byte)
+//
+//		Ranging_Time Message Count
+//			Ranging_Time message count: The counter of received Ranging_Time PLOAM messages. (R) (mandatory)
+//			(4 byte)
+//
+//		Protection_Control Message Count
+//			Protection_Control message count: The counter of received Protection_Control PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Adjust_Tx_Wavelength Message Count
+//			Adjust_Tx_Wavelength message count: The counter of received Adjust_Tx_Wavelength PLOAM messages.
+//			(R) (mandatory) (4 byte)
+//
+//		Adjust_Tx_Wavelength Adjustment Amplitude
+//			Adjust_Tx_Wavelength adjustment amplitude: An estimator of the absolute value of the
+//			transmission wavelength adjustment. (R) (mandatory) (4 byte)
+//
+type TwdmChannelPloamPerformanceMonitoringHistoryDataPart1 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelploamperformancemonitoringhistorydatapart1BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelPloamPerformanceMonitoringHistoryDataPart1",
+		ClassID: 446,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("PloamMicErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("DownstreamPloamMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("RangingTimeMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("ProtectionControlMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint32Field("AdjustTxWavelengthMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint32Field("AdjustTxWavelengthAdjustmentAmplitude", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart1 (class ID 446 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart1(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelploamperformancemonitoringhistorydatapart1BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart2.go b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart2.go
new file mode 100644
index 0000000..da563d9
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart2.go
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelPloamPerformanceMonitoringHistoryDataPart2ClassId ClassID = ClassID(447)
+
+var twdmchannelploamperformancemonitoringhistorydatapart2BME *ManagedEntityDefinition
+
+// TwdmChannelPloamPerformanceMonitoringHistoryDataPart2 (class ID #447)
+//	This ME collects additional PLOAM-related PM data associated with the slot/circuit pack, hosting
+//	one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and deleted
+//	by the OLT.
+//
+//	The downstream PLOAM message counts of this ME include only the received PLOAM messages
+//	pertaining to the given ONU, i.e.:
+//
+//	–	unicast PLOAM messages, addressed by ONU-ID;
+//
+//	–	broadcast PLOAM messages, addressed by serial number;
+//
+//	–	broadcast PLOAM messages, addressed to all ONUs on the PON.
+//
+//	All these counters are characterized as optional in clause 14 of [ITU-T  G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		System_Profile Message Count
+//			System_Profile message count: The counter of received System_Profile PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Channel_Profile Message Count
+//			Channel_Profile message count: The counter of received Channel_Profile PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Burst_Profile Message Count
+//			Burst_Profile message count: The counter of received Burst_Profile PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Assign_Onu_Id Message Count
+//			Assign_ONU-ID message count: The counter of received Assign_ONU-ID PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Unsatisfied Adjust_Tx_Wavelength Requests
+//			Unsatisfied Adjust_Tx_Wavelength requests: The counter of Adjust_Tx_Wavelength requests not
+//			applied or partially applied due to target US wavelength being out of Tx tuning range.  (R)
+//			(mandatory) (4 byte)
+//
+//		Deactivate_Onu_Id Message Count
+//			Deactivate_ONU-ID message count: The counter of received Deactivate_ONU-ID PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Disable_Serial_Number Message Count
+//			Disable_Serial_Number message count: The counter of received Disable_Serial_Number PLOAM
+//			messages. (R) (mandatory) (4 byte)
+//
+//		Request_Registration Message Count
+//			Request_Registration message count: The counter of received Request_Registration PLOAM messages.
+//			(R) (mandatory) (4 byte)
+//
+//		Assign_Alloc_Id Message Count
+//			Assign_Alloc-ID message count: The counter of received Assign_Alloc-ID PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Key_Control Message Count
+//			Key_Control message count: The counter of received Key_Control PLOAM messages. (R) (mandatory)
+//			(4 byte)
+//
+//		Sleep_Allow Message Count
+//			Sleep_Allow message count: The counter of received Sleep_Allow PLOAM messages. (R) (mandatory)
+//			(4 byte)
+//
+//		Tuning_Control_Request Message Count
+//			Tuning_Control/Request message count: The counter of received Tuning_Control PLOAM messages with
+//			Request operation code. (R) (mandatory) (4 byte)
+//
+//		Tuning_Control_Complete_D Message Count
+//			Tuning_Control/Complete_d message count: The counter of received Tuning_Control PLOAM messages
+//			with Complete_d operation code. (R) (mandatory) (4 byte)
+//
+//		Calibration_Request Message Count
+//			Calibration_Request message count: The counter of received Calibration_Request PLOAM messages.
+//			(R) (mandatory) (4 byte)
+//
+type TwdmChannelPloamPerformanceMonitoringHistoryDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelploamperformancemonitoringhistorydatapart2BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelPloamPerformanceMonitoringHistoryDataPart2",
+		ClassID: 447,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("SystemProfileMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("ChannelProfileMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("BurstProfileMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("AssignOnuIdMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("UnsatisfiedAdjustTxWavelengthRequests", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("DeactivateOnuIdMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("DisableSerialNumberMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("RequestRegistrationMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("AssignAllocIdMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("KeyControlMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("SleepAllowMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("TuningControlRequestMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("TuningControlCompleteDMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint32Field("CalibrationRequestMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart2 (class ID 447 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelploamperformancemonitoringhistorydatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart3.go b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart3.go
new file mode 100644
index 0000000..aa5ef38
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelploamperformancemonitoringhistorydatapart3.go
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelPloamPerformanceMonitoringHistoryDataPart3ClassId ClassID = ClassID(448)
+
+var twdmchannelploamperformancemonitoringhistorydatapart3BME *ManagedEntityDefinition
+
+// TwdmChannelPloamPerformanceMonitoringHistoryDataPart3 (class ID #448)
+//	This ME collects remaining PLOAM-related PM data associated with the slot/circuit pack, hosting
+//	one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and deleted
+//	by the OLT.
+//
+//	This ME contains the counters related to transmitted upstream PLOAM messages. All these counters
+//	are characterized as optional in clause 14 of [ITU-T  G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Upstream Ploam Message Count
+//			Upstream PLOAM message count: The aggregate counter of PLOAM messages, other than AK PLOAM MT,
+//			transmitted by the given ONU. (R) (mandatory) (4 byte)
+//
+//		Serial_Number_Onu In_Band Message Count
+//			Serial_Number_ONU (in-band) message count: The counter of transmitted in-band Serial_Number_ONU
+//			PLOAM messages. (R) (mandatory) (4 byte)
+//
+//		Serial_Number_Onu Amcc Message Count
+//			Serial_Number_ONU (AMCC) message count: The counter of transmitted auxiliary management and
+//			control channel (AMCC) Serial_Number_ONU PLOAM messages. (R) (mandatory) (4 byte)
+//
+//		Registration Message Count
+//			Registration message count: The counter of transmitted Registration PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Key_Report Message Count
+//			Key_Report message count: The counter of transmitted Key_Report PLOAM messages. (R) (mandatory)
+//			(4 byte)
+//
+//		Acknowledgement Message Count
+//			Acknowledgement message count: The counter of transmitted Registration PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Sleep_Request Message Count
+//			Sleep_Request message count: The counter of transmitted Sleep_Request PLOAM messages. (R)
+//			(mandatory) (4 byte)
+//
+//		Tuning_Response Ack_Nack Message Count
+//			Tuning_Response (ACK/NACK) message count: The counter of transmitted Tuning_Response PLOAM
+//			messages with ACK/NACK operation code. (R) (mandatory) (4 byte)
+//
+//		Tuning_Response Complete_U_Rollback Message Count
+//			Tuning_Response (Complete_u/Rollback) message count: The counter of transmitted Tuning_Response
+//			PLOAM messages with Complete_u/Rollback operation code. (R) (mandatory) (4 byte)
+//
+//		Power_Consumption_Report Message Count
+//			Power_Consumption_Report message count: The counter of transmitted Power_Consumption_Report
+//			PLOAM messages. (R) (mandatory) (4 byte)
+//
+//		Change_Power_Level Parameter Error Count
+//			Change_Power_Level parameter error count: The counter of transmitted Acknowledgement PLOAM
+//			messages with Parameter Error completion code in response to Change_Power_Level PLOAM message.
+//			(R) (mandatory) (4 byte)
+//
+type TwdmChannelPloamPerformanceMonitoringHistoryDataPart3 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelploamperformancemonitoringhistorydatapart3BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelPloamPerformanceMonitoringHistoryDataPart3",
+		ClassID: 448,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("UpstreamPloamMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("SerialNumberOnuInBandMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("SerialNumberOnuAmccMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("RegistrationMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("KeyReportMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("AcknowledgementMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("SleepRequestMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TuningResponseAckNackMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("TuningResponseCompleteURollbackMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("PowerConsumptionReportMessageCount", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("ChangePowerLevelParameterErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+		},
+	}
+}
+
+// NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart3 (class ID 448 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelPloamPerformanceMonitoringHistoryDataPart3(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelploamperformancemonitoringhistorydatapart3BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart1.go b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart1.go
new file mode 100644
index 0000000..8e44310
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart1.go
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelTuningPerformanceMonitoringHistoryDataPart1ClassId ClassID = ClassID(449)
+
+var twdmchanneltuningperformancemonitoringhistorydatapart1BME *ManagedEntityDefinition
+
+// TwdmChannelTuningPerformanceMonitoringHistoryDataPart1 (class ID #449)
+//	This ME collects certain tuning-control-related PM data associated with the slot/circuit pack,
+//	hosting one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	The relevant events this ME is concerned with are counted towards the PM statistics associated
+//	with the source TWDM channel. The attribute descriptions refer to the ONU activation cycle
+//	states and timers specified in clause 12 of [ITU-T  G.989.3]. This ME contains the counters
+//	characterized as mandatory in clause 14 of [ITU-T  G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tuning Control Requests For Rx Only Or Rx And Tx
+//			Tuning control requests for Rx only or Rx and Tx: The counter of received Tuning_Control PLOAM
+//			messages with Request operation code that contain tuning instructions either for receiver only
+//			or for both receiver and transmitter. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests For Tx Only
+//			Tuning control requests for Tx only: The counter of received Tuning_Control PLOAM messages with
+//			Request operation code that contain tuning instructions for transmitter only. (R) (mandatory)
+//			(4 byte)
+//
+//		Tuning Control Requests Rejected_Int_Sfc
+//			Tuning control requests rejected/INT_SFC: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and INT_SFC response code, indicating inability to start
+//			transceiver tuning by the specified time (SFC). (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Xxx
+//			Tuning control requests rejected/DS_xxx: The aggregate counter of transmitted Tuning_Response
+//			PLOAM messages with NACK operation code and any DS_xxx response code, indicating target
+//			downstream wavelength channel inconsistency. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Xxx
+//			Tuning control requests rejected/US_xxx: The aggregate counter of transmitted Tuning_Response
+//			PLOAM messages with NACK operation code and any US_xxx response code, indicating target upstream
+//			wavelength channel inconsistency. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Fulfilled With Onu Reacquired At Target Channel
+//			Tuning control requests fulfilled with ONU reacquired at target channel: The counter of
+//			controlled tuning attempts for which an upstream tuning confirmation has been obtained in the
+//			target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Failed Due To Target Ds Wavelength Channel Not Found
+//			Tuning control requests failed due to target DS wavelength channel not found: The counter of
+//			controlled tuning attempts that failed due to timer TO4 expiration in the DS Tuning state (O8)
+//			in the target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Failed Due To No Feedback In Target Ds Wavelength Channel
+//			Tuning control requests failed due to no feedback in target DS wavelength channel: The counter
+//			of controlled tuning attempts that failed due to timer TO5 expiration in the US Tuning state
+//			(O9) in the target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Resolved With Onu Reacquired At Discretionary Channel
+//			Tuning control requests resolved with ONU reacquired at discretionary channel: The counter of
+//			controlled tuning attempts for which an upstream tuning confirmation has been obtained in the
+//			discretionary channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Com_Ds
+//			Tuning control requests Rollback/COM_DS: The counter of controlled tuning attempts that failed
+//			due to communication condition in the target channel, as indicated by the Tuning_Response PLOAM
+//			message with Rollback operation code and COM_DS response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Ds_Xxx
+//			Tuning control requests Rollback/DS_xxx: The aggregate counter of controlled tuning attempts
+//			that failed due to target downstream wavelength channel inconsistency, as indicated by the
+//			Tuning_Response PLOAM message with Rollback operation code and any DS_xxx response code. (R)
+//			(mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Xxx
+//			Tuning control requests Rollback/US_xxx: The aggregate counter of controlled tuning attempts
+//			that failed due to target upstream wavelength channel parameter violation, as indicated by the
+//			Tuning_Response PLOAM message with Rollback operation code and US_xxx response code. (R)
+//			(mandatory) (4 byte)
+//
+//		Tuning Control Requests Failed With Onu Reactivation
+//			Tuning control requests failed with ONU reactivation: The counter of controlled tuning attempts
+//			that failed on any reason, with expiration of timers TO4 or TO5 causing the ONU transition into
+//			state O1. (R) (mandatory) (4 byte)
+//
+type TwdmChannelTuningPerformanceMonitoringHistoryDataPart1 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchanneltuningperformancemonitoringhistorydatapart1BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelTuningPerformanceMonitoringHistoryDataPart1",
+		ClassID: 449,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("TuningControlRequestsForRxOnlyOrRxAndTx", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TuningControlRequestsForTxOnly", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("TuningControlRequestsRejectedIntSfc", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("TuningControlRequestsRejectedDsXxx", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("TuningControlRequestsRejectedUsXxx", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TuningControlRequestsFulfilledWithOnuReacquiredAtTargetChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("TuningControlRequestsFailedDueToTargetDsWavelengthChannelNotFound", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TuningControlRequestsFailedDueToNoFeedbackInTargetDsWavelengthChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("TuningControlRequestsResolvedWithOnuReacquiredAtDiscretionaryChannel", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TuningControlRequestsRollbackComDs", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("TuningControlRequestsRollbackDsXxx", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("TuningControlRequestsRollbackUsXxx", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("TuningControlRequestsFailedWithOnuReactivation", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+		},
+	}
+}
+
+// NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart1 (class ID 449 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart1(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchanneltuningperformancemonitoringhistorydatapart1BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart2.go b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart2.go
new file mode 100644
index 0000000..479ba9c
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart2.go
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelTuningPerformanceMonitoringHistoryDataPart2ClassId ClassID = ClassID(450)
+
+var twdmchanneltuningperformancemonitoringhistorydatapart2BME *ManagedEntityDefinition
+
+// TwdmChannelTuningPerformanceMonitoringHistoryDataPart2 (class ID #450)
+//	This ME collects additional tuning-control-related PM data associated with the slot/circuit
+//	pack, hosting one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are
+//	created and deleted by the OLT.
+//
+//	The relevant events this ME is concerned with are counted towards the PM statistics associated
+//	with the source TWDM channel. This ME contains the counters characterized as optional in clause
+//	14 of [ITU-T  G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tuning Control Requests Rejected_Ds_Albl
+//			Tuning control requests rejected/DS_ALBL: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_ALBL response code, indicating downstream
+//			administrative label inconsistency. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Void
+//			Tuning control requests rejected/DS_VOID: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_VOID response code, indicating that the target
+//			downstream wavelength channel descriptor is void. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Part
+//			Tuning control requests rejected/DS_PART: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_PART response code, indicating that tuning request
+//			involves channel partition violation. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Tunr
+//			Tuning control requests rejected/DS_TUNR: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_TUNR response code, indicating that the target DS
+//			wavelength channel is out of receiver tuning range. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Lnrt
+//			Tuning control requests rejected/DS_LNRT: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_LNRT response code, indicating downstream line rate
+//			inconsistency in the target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Ds_Lncd
+//			Tuning control requests rejected/DS_LNCD: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and DS_LNCD response code, indicating downstream line code
+//			inconsistency in the target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Albl
+//			Tuning control requests rejected/US_ALBL: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_ALBL response code, indicating upstream administrative
+//			label inconsistency. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Void
+//			Tuning control requests rejected/US_VOID: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_VOID response code, indicating that the target upstream
+//			wavelength channel descriptor is void. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Tunr
+//			Tuning control requests rejected/US_TUNR: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_TUNR response code, indicating that the target US
+//			wavelength channel is out of transmitter tuning range. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Clbr
+//			Tuning control requests rejected/US_CLBR: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_CLBR response code, indicating that the transmitter has
+//			insufficient calibration accuracy in the target US wavelength channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Lktp
+//			Tuning control requests rejected/US_LKTP: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_LKTP response code, indicating upstream optical link
+//			type inconsistency. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Lnrt
+//			Tuning control requests rejected/US_LNRT: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_LNRT response code, indicating upstream line rate
+//			inconsistency in the target channel. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rejected_Us_Lncd
+//			Tuning control requests rejected/US_LNCD: The counter of transmitted Tuning_Response PLOAM
+//			messages with NACK operation code and US_LNCD response code, indicating upstream line code
+//			inconsistency in the target channel. (R) (mandatory) (4 byte)
+//
+type TwdmChannelTuningPerformanceMonitoringHistoryDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchanneltuningperformancemonitoringhistorydatapart2BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelTuningPerformanceMonitoringHistoryDataPart2",
+		ClassID: 450,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("TuningControlRequestsRejectedDsAlbl", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TuningControlRequestsRejectedDsVoid", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("TuningControlRequestsRejectedDsPart", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("TuningControlRequestsRejectedDsTunr", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("TuningControlRequestsRejectedDsLnrt", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TuningControlRequestsRejectedDsLncd", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("TuningControlRequestsRejectedUsAlbl", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TuningControlRequestsRejectedUsVoid", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("TuningControlRequestsRejectedUsTunr", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("TuningControlRequestsRejectedUsClbr", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint32Field("TuningControlRequestsRejectedUsLktp", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint32Field("TuningControlRequestsRejectedUsLnrt", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint32Field("TuningControlRequestsRejectedUsLncd", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+		},
+	}
+}
+
+// NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart2 (class ID 450 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchanneltuningperformancemonitoringhistorydatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart3.go b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart3.go
new file mode 100644
index 0000000..ac28bcc
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchanneltuningperformancemonitoringhistorydatapart3.go
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelTuningPerformanceMonitoringHistoryDataPart3ClassId ClassID = ClassID(451)
+
+var twdmchanneltuningperformancemonitoringhistorydatapart3BME *ManagedEntityDefinition
+
+// TwdmChannelTuningPerformanceMonitoringHistoryDataPart3 (class ID #451)
+//	This ME collects remaining tuning-control-related PM data associated with the slot/circuit pack,
+//	hosting one or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	The relevant events this ME is concerned with are counted towards the PM statistics associated
+//	with the source TWDM channel. This ME contains the counters characterized as optional in clause
+//	14 of [ITU-T G.989.3].
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Tuning Control Requests Rollback_Ds_Albl
+//			Tuning control requests Rollback/DS_ALBL: The counter of controlled tuning attempts that failed
+//			due to downstream administrative label inconsistency, as indicated by the Tuning_Response PLOAM
+//			message with Rollback operation code and DS_ALBL response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Ds_Lktp
+//			Tuning control requests Rollback/DS_LKTP: The counter of controlled tuning attempts that failed
+//			due to downstream optical link type inconsistency, as indicated by the Tuning_Response PLOAM
+//			message with Rollback operation code and DS_LKTP response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Albl
+//			Tuning control requests Rollback/US_ALBL: The counter of controlled tuning attempts that failed
+//			due to upstream administrative label violation, as indicated by the Tuning_Response PLOAM
+//			message with Rollback operation code and US_ALBL response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Void
+//			Tuning control requests Rollback/US_VOID: The counter of controlled tuning attempts that failed
+//			due to the target upstream wavelength channel descriptor being void, as indicated by the
+//			Tuning_Response PLOAM message with Rollback operation code and US_VOID response code. (R)
+//			(mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Tunr
+//			Tuning control requests Rollback/US_TUNR: The counter of controlled tuning attempts that failed
+//			due to the transmitter tuning range violation, as indicated by the Tuning_Response PLOAM message
+//			with Rollback operation code and US_TUNR response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Lktp
+//			Tuning control requests Rollback/US_LKTP: The counter of controlled tuning attempts that failed
+//			due to the upstream optical link type violation, as indicated by the Tuning_Response PLOAM
+//			message with Rollback operation code and US_LKTP response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Lnrt
+//			Tuning control requests Rollback/US_LNRT: The counter of controlled tuning attempts that failed
+//			due to the upstream line rate violation, as indicated by the Tuning_Response PLOAM message with
+//			Rollback operation code and US_LNRT response code. (R) (mandatory) (4 byte)
+//
+//		Tuning Control Requests Rollback_Us_Lncd
+//			Tuning control requests Rollback/US_LNCD: The counter of controlled tuning attempts that failed
+//			due to the upstream line code violation, as indicated by the Tuning_Response PLOAM message with
+//			Rollback operation code and US_LNCD response code. (R) (mandatory) (4 byte)
+//
+type TwdmChannelTuningPerformanceMonitoringHistoryDataPart3 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchanneltuningperformancemonitoringhistorydatapart3BME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelTuningPerformanceMonitoringHistoryDataPart3",
+		ClassID: 451,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("TuningControlRequestsRollbackDsAlbl", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint32Field("TuningControlRequestsRollbackDsLktp", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("TuningControlRequestsRollbackUsAlbl", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("TuningControlRequestsRollbackUsVoid", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("TuningControlRequestsRollbackUsTunr", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("TuningControlRequestsRollbackUsLktp", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("TuningControlRequestsRollbackUsLnrt", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("TuningControlRequestsRollbackUsLncd", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+		},
+	}
+}
+
+// NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart3 (class ID 451 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelTuningPerformanceMonitoringHistoryDataPart3(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchanneltuningperformancemonitoringhistorydatapart3BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/twdmchannelxgemperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/twdmchannelxgemperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..b81fba8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/twdmchannelxgemperformancemonitoringhistorydata.go
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const TwdmChannelXgemPerformanceMonitoringHistoryDataClassId ClassID = ClassID(445)
+
+var twdmchannelxgemperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// TwdmChannelXgemPerformanceMonitoringHistoryData (class ID #445)
+//	This ME collects certain XGEM-related PM data associated with the slot/circuit pack, hosting one
+//	or more ANI-G MEs, for a specific TWDM channel. Instances of this ME are created and deleted by
+//	the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of TWDM channel ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the TWDM channel ME. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 64 B It Id
+//			Threshold data 64 bit ID: This attribute points to an instance of the threshold data 64 bit ME
+//			that contains PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Total Transmitted Xgem Frames
+//			Total transmitted XGEM frames: The counter aggregated across all XGEM ports of the given ONU.
+//			(R) (mandatory) (8 byte)
+//
+//		Transmitted Xgem Frames With Lf Bit Not Set
+//			Transmitted XGEM frames with LF bit not set: The counter aggregated across all XGEM ports of the
+//			given ONU identifies the number of fragmentation operations. (R) (mandatory) (8 byte)
+//
+//		Total Received Xgem Frames
+//			Total received XGEM frames: The counter aggregated across all XGEM ports of the given ONU. (R)
+//			(mandatory) (8 byte)
+//
+//		Received Xgem Frames With Xgem Header Hec Errors
+//			Received XGEM frames with XGEM header HEC errors: The counter aggregated across all XGEM ports
+//			of the given ONU identifies the number of loss XGEM frame delineation events. (R) (mandatory)
+//			(8 byte)
+//
+//		Fs Words Lost To Xgem Header Hec Errors
+//			FS words lost to XGEM header HEC errors: The counter of the FS frame words lost due to XGEM
+//			frame header errors that cause loss of XGEM frame delineation. (R) (mandatory) (8 byte)
+//
+//		Xgem Encryption Key Errors
+//			XGEM encryption key errors: The counter aggregated across all XGEM ports of the given ONU
+//			identifies the number of received XGEM frames that have to be discarded because of unknown or
+//			invalid encryption key. The number is included into the Total received XGEM frame count above.
+//			(R) (mandatory) (8 byte)
+//
+//		Total Transmitted Bytes In Non_Idle Xgem Frames
+//			Total transmitted bytes in non-idle XGEM frames: The counter aggregated across all XGEM ports of
+//			the given. (R) (mandatory) (8 byte)
+//
+//		Total Received Bytes In Non_Idle Xgem Frames
+//			Total received bytes in non-idle XGEM frames: The counter aggregated across all XGEM ports of
+//			the given ONU. (R) (mandatory) (8 byte)
+//
+type TwdmChannelXgemPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	twdmchannelxgemperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "TwdmChannelXgemPerformanceMonitoringHistoryData",
+		ClassID: 445,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			GetCurrentData,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFC0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData64BItId", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint64Field("TotalTransmittedXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint64Field("TransmittedXgemFramesWithLfBitNotSet", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint64Field("TotalReceivedXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint64Field("ReceivedXgemFramesWithXgemHeaderHecErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint64Field("FsWordsLostToXgemHeaderHecErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint64Field("XgemEncryptionKeyErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint64Field("TotalTransmittedBytesInNonIdleXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint64Field("TotalReceivedBytesInNonIdleXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+		},
+	}
+}
+
+// NewTwdmChannelXgemPerformanceMonitoringHistoryData (class ID 445 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewTwdmChannelXgemPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(twdmchannelxgemperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/uni-g.go b/vendor/github.com/cboling/omci/generated/uni-g.go
new file mode 100644
index 0000000..eb6fb6a
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/uni-g.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const UniGClassId ClassID = ClassID(264)
+
+var unigBME *ManagedEntityDefinition
+
+// UniG (class ID #264)
+//	This ME organizes data associated with UNIs supported by GEM. One instance of the UNI-G ME
+//	exists for each UNI supported by the ONU.
+//
+//	The ONU automatically creates or deletes instances of this ME upon the creation or deletion of a
+//	real or virtual circuit pack ME, one per port.
+//
+//	Relationships
+//		An instance of the UNI-G ME exists for each instance of a PPTP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of a PPTP. (R) (mandatory) (2 bytes)
+//
+//		Deprecated
+//			Deprecated:	This attribute is not used. It should be set to 0 by the OLT and ignored by the ONU.
+//			(R, W) (mandatory) (2 bytes)
+//
+//		Administrative State
+//			NOTE – PPTP MEs also have an administrative state attribute. The user port is unlocked only if
+//			both administrative state attributes are set to unlocked. It is recommended that this attribute
+//			not be used: that the OLT set it to 0 and that the ONU ignore it.
+//
+//		Management Capability
+//			(R) (optional) (1 byte)
+//
+//		Non_Omci Management Identifier
+//			Non-OMCI management identifier: If a PPTP can be managed either directly by the OMCI or a non-
+//			OMCI management environment, this attribute specifies how it is in fact to be managed. This
+//			attribute is either 0 (default = OMCI management), or it is a pointer to a VEIP, which in turn
+//			links to a non-OMCI management environment. (R, W) (optional) (2 bytes)
+//
+//		Relay Agent Options
+//			2/3/4:atm/123.4567
+//
+type UniG struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	unigBME = &ManagedEntityDefinition{
+		Name:    "UniG",
+		ClassID: 264,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("Deprecated", 0, mapset.NewSetWith(Read, Write), false, false, false, true, 1),
+			2: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: ByteField("ManagementCapability", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4: Uint16Field("NonOmciManagementIdentifier", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: Uint16Field("RelayAgentOptions", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewUniG (class ID 264 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewUniG(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(unigBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/vdsl2lineconfigurationextensions3.go b/vendor/github.com/cboling/omci/generated/vdsl2lineconfigurationextensions3.go
new file mode 100644
index 0000000..b566e20
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/vdsl2lineconfigurationextensions3.go
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const Vdsl2LineConfigurationExtensions3ClassId ClassID = ClassID(410)
+
+var vdsl2lineconfigurationextensions3BME *ManagedEntityDefinition
+
+// Vdsl2LineConfigurationExtensions3 (class ID #410)
+//	This ME extends the xDSL line configuration MEs.
+//
+//	An instance of this ME is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of an xDSL UNI.////		The overall xDSL line configuration profile is modelled in several parts, all of which are
+//		associated together through a common ME ID (the client PPTP xDSL UNI part 1 has a single
+//		pointer, which refers to the entire set of line configuration parts).
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. All xDSL and
+//			VDSL2 line configuration profiles and extensions that pertain to a given PPTP xDSL UNI must
+//			share a common ME ID. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Ripolicyds
+//			RIPOLICYds: This attribute indicates which policy shall be applied to determine the triggers for
+//			re-initialization in the downstream direction. A valid range of values is given in clause
+//			7.3.1.1.12.1 of [ITU-T G.997.1]. (R, W) (optional) (1 byte)
+//
+//		Ripolicyus
+//			RIPOLICYus: This attribute indicates which policy shall be applied to determine the triggers for
+//			re-initialization in the upstream direction. A valid range of values is given in clause
+//			7.3.1.1.12.2 of [ITU-T G.997.1]. (R, W) (optional) (1 byte)
+//
+//		Reinit_Time_Thresholdds
+//			REINIT_TIME_THRESHOLDds: This attribute indicates defines the downstream threshold for re-
+//			initialization based on SES, to be used by the VTU receiver when re-initialization policy 1 is
+//			used in downstream. A valid range of values is given in clause 7.3.1.1.13.1 of [ITU-T G.997.1].
+//			(R, W) (optional) (1 byte)
+//
+//		Reinit_Time_Thresholdus
+//			REINIT_TIME_THRESHOLDus: This attribute indicates defines the upstream threshold for re-
+//			initialization based on SES, to be used by the VTU receiver when reinitialization policy 1 is
+//			used in upstream. A valid range of values is given in clause 7.3.1.1.13.2 of [ITU-T G.997.1].
+//			(R, W) (optional) (1 byte)
+//
+//		Rxrefvnsfus
+//			RXREFVNSFus: If SNRM_MODE = 4, this attribute defines the upstream receiver-referred virtual
+//			noise scaling factor. The attribute value ranges from 0 (–64.0 dBm) to 255 (+63.5 dBm) – see
+//			clause 7.3.1.7.5 of [ITU-T G.997.1]. (R, W) (optional) (1 byte)
+//
+//		Txrefvnsfds
+//			TXREFVNSFds: If SNRM_MODE = 4, this attribute defines the downstream transmitter referred
+//			virtual noise scaling factor. The attribute value ranges from 0 (–64.0 dBm) to 255 (+63.5 dBm)
+//			– see clause 7.3.1.7.6 of [ITU-T G.997.1]. (R, W) (optional) (1 byte)
+//
+//		Rtx_Modeds
+//			RTX_MODEds: This attribute controls the mode of operation of [ITU-T G.998.4] retransmission in
+//			the downstream direction. A valid range of values is given in clause 7.3.1.11 of [ITU-T
+//			G.997.1]. (R, W) (mandatory) (1 byte)
+//
+//		Rtx_Modeus
+//			RTX_MODEus: This attribute controls the mode of operation of [ITU-T G.998.4] retransmission in
+//			the upstream direction. A valid range of values is given in clause 7.3.1.11 of [ITU-T G.997.1].
+//			(R, W) (mandatory) (1 byte)
+//
+//		Leftr_Thresh
+//			LEFTR_THRESH: If retransmission is used in a given transmit direction, LEFTR_THRESH specifies
+//			the threshold for declaring a near-end ''leftr'' defect. LEFTR_THRESH is equal to the integer
+//			value of this attribute multiplied by 0.01. Valid values and usage are given in clause 7.3.1.12
+//			of [ITU-T G.997.1]. (R, W) (mandatory) (1 byte)
+//
+//		Maxdelayoctet_Split Parameter Mdosplit
+//			MAXDELAYOCTET-split parameter (MDOSPLIT): This attribute defines the percentage of the
+//			MAXDELAYOCTET_ext if operating in [ITU-T G.998.4] or MAXDELAYOCTET in other cases allocated to
+//			the downstream direction. MDOSPLIT is equal to the integer value of this attribute multiplied by
+//			1%. Valid values and usage are given in clause 7.3.1.14 of [ITUT G.997.1]. (R, W) (optional)
+//			(1 byte)
+//
+//		Attndr Method Attndr_Method
+//			ATTNDR Method (ATTNDR_METHOD): This attribute specifies the method to be used for the
+//			calculation of the ATTNDR in the downstream and upstream direction. Valid values are given in
+//			clause 7.3.1.15.1 of [ITU-T G.997.1]. (R, W) (optional) (1 byte)
+//
+//		Attndr Maxdelayoctet_Split Parameter Attndr_Mdosplit
+//			ATTNDR MAXDELAYOCTET-split parameter (ATTNDR_MDOSPLIT): This attribute defines the percentage of
+//			the MAXDELAYOCTET_ext if operating in [ITU-T G.998.4] or MAXDELAYOCTET in other cases allocated
+//			to the downstream direction to be used in the improved method for calculation of the ATTNDR. The
+//			valid values are identical to the values of the line configuration parameter MDOSPLIT. See
+//			clause 7.3.1.15.2 of [ITUT G.997.1]. (R, W) (optional) (1 byte)
+//
+type Vdsl2LineConfigurationExtensions3 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	vdsl2lineconfigurationextensions3BME = &ManagedEntityDefinition{
+		Name:    "Vdsl2LineConfigurationExtensions3",
+		ClassID: 410,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("Ripolicyds", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 1),
+			2:  ByteField("Ripolicyus", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 2),
+			3:  ByteField("ReinitTimeThresholdds", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4:  ByteField("ReinitTimeThresholdus", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  ByteField("Rxrefvnsfus", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  ByteField("Txrefvnsfds", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  ByteField("RtxModeds", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("RtxModeus", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("LeftrThresh", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: ByteField("MaxdelayoctetSplitParameterMdosplit", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("AttndrMethodAttndrMethod", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("AttndrMaxdelayoctetSplitParameterAttndrMdosplit", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewVdsl2LineConfigurationExtensions3 (class ID 410 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVdsl2LineConfigurationExtensions3(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(vdsl2lineconfigurationextensions3BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/version.go b/vendor/github.com/cboling/omci/generated/version.go
new file mode 100644
index 0000000..93f5a1b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/version.go
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+// This file is used to track the version(s) of code used to parse the ITU
+// document and create the generated code.
+
+type VersionInfo struct {
+	Name       string  // Type (pre-parser, parser, code-generator)
+	Version    string  // Version of parser project
+	CreateTime float32 // UTC linux time when ran
+	ItuDocName string  // ITU G.988 document name
+	SHA256     string  // ITU G.988 document SHA-256 hash
+}
+
+var Versions []VersionInfo
+
+func init() {
+	Versions = make([]VersionInfo, 0)
+
+	Versions = append(Versions,
+		VersionInfo{
+			Name:       "parser",
+			Version:    "0.9.0",
+			CreateTime: 1570304206.346817,
+			ItuDocName: "T-REC-G.988-2017-11.docx",
+			SHA256:     "96ffc8bca6f70175c8e281e87e1cf21662d07a7502ebf595c5c3180a9972b9ac",
+		})
+
+	Versions = append(Versions,
+		VersionInfo{
+			Name:       "pre-parser",
+			Version:    "0.9.0",
+			CreateTime: 1570304142.338239,
+			ItuDocName: "T-REC-G.988-2017-11.docx",
+			SHA256:     "96ffc8bca6f70175c8e281e87e1cf21662d07a7502ebf595c5c3180a9972b9ac",
+		})
+
+	Versions = append(Versions,
+		VersionInfo{
+			Name:       "code-generator",
+			Version:    "0.9.0",
+			CreateTime: 1570305842.186913,
+			ItuDocName: "T-REC-G.988-2017-11.docx",
+			SHA256:     "96ffc8bca6f70175c8e281e87e1cf21662d07a7502ebf595c5c3180a9972b9ac",
+		})
+}
diff --git a/vendor/github.com/cboling/omci/generated/virtualethernetinterfacepoint.go b/vendor/github.com/cboling/omci/generated/virtualethernetinterfacepoint.go
new file mode 100644
index 0000000..b2bf4a6
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/virtualethernetinterfacepoint.go
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VirtualEthernetInterfacePointClassId ClassID = ClassID(329)
+
+var virtualethernetinterfacepointBME *ManagedEntityDefinition
+
+// VirtualEthernetInterfacePoint (class ID #329)
+//	This ME represents the data plane hand-off point in an ONU to a separate (non-OMCI) management
+//	domain. The VEIP is managed by the OMCI, and is potentially known to the non-OMCI management
+//	domain. One or more Ethernet traffic flows are present at this boundary.
+//
+//	Instances of this ME are automatically created and deleted by the ONU. This is necessary because
+//	the required downstream priority queues are subject to physical implementation constraints. The
+//	OLT may use one or more of the VEIPs created by the ONU.
+//
+//	It is expected that the ONU will create one VEIP for each non-OMCI management domain. At the
+//	vendor's discretion, a VEIP may be created for each traffic class.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a virtual Ethernet interface between
+//		OMCI and non-OMCI management domains.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. When used
+//			independently of a cardholder and circuit pack, the ONU should assign IDs in the sequence 1, 2,
+//			.... When used in conjunction with a cardholder and circuit pack, this 2 byte number indicates
+//			the physical position of the VEIP. The first byte is the slot ID (defined in clause 9.1.5). The
+//			second byte is the port ID, with the range 1..255. The values 0 and 0xFFFF are reserved. (R)
+//			(mandatory) (2 bytes)
+//
+//		Administrative State
+//			Administrative state: This attribute locks (1) and unlocks (0) the functions performed by this
+//			ME. Administrative state is further described in clause A.1.6. (R, W) (mandatory) (1 byte)
+//
+//		Operational State
+//			Operational state: This attribute indicates whether the ME is capable of performing its
+//			function. Valid values are enabled (0) and disabled (1). (R) (optional) (1 byte)
+//
+//		Interdomain Name
+//			Interdomain name: This attribute is a character string that provides an optional way to identify
+//			the VEIP to a non-OMCI management domain. The interface may also be identified by its ME ID,
+//			[b-IANA] assigned port and possibly other ways. If the vendor offers no information in this
+//			attribute, it should be set to a sequence of null bytes. (R, W) (optional) (25 bytes)
+//
+//		Tcp_Udp Pointer
+//			TCP/UDP pointer: This attribute points to an instance of the TCP/UDP config data ME, which
+//			provides for OMCI management of the non-OMCI management domain's IP connectivity. If no OMCI
+//			management of the non-OMCI domain's IP connectivity is required, this attribute may be omitted
+//			or set to its default, a null pointer. (R, W) (optional) (2 bytes)
+//
+//		Iana Assigned Port
+//			IANA assigned port: This attribute contains the TCP or UDP port value as assigned by [b-IANA]
+//			for the management protocol associated with this virtual Ethernet interface. This attribute is
+//			to be regarded as a hint, not as a requirement that management communications use this port; the
+//			actual port and protocol are specified in the associated TCP/UDP config data ME. If no port has
+//			been assigned or if the management protocol is free to be chosen at run-time, this attribute
+//			should be set to 0xFFFF. (R) (mandatory) (2 bytes)
+//
+type VirtualEthernetInterfacePoint struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	virtualethernetinterfacepointBME = &ManagedEntityDefinition{
+		Name:    "VirtualEthernetInterfacePoint",
+		ClassID: 329,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("AdministrativeState", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2: ByteField("OperationalState", 0, mapset.NewSetWith(Read), true, false, true, false, 2),
+			3: MultiByteField("InterdomainName", 25, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4: Uint16Field("TcpUdpPointer", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5: Uint16Field("IanaAssignedPort", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewVirtualEthernetInterfacePoint (class ID 329 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVirtualEthernetInterfacePoint(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(virtualethernetinterfacepointBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/vlantaggingfilterdata.go b/vendor/github.com/cboling/omci/generated/vlantaggingfilterdata.go
new file mode 100644
index 0000000..209517f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/vlantaggingfilterdata.go
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VlanTaggingFilterDataClassId ClassID = ClassID(84)
+
+var vlantaggingfilterdataBME *ManagedEntityDefinition
+
+// VlanTaggingFilterData (class ID #84)
+//	This ME organizes data associated with VLAN tagging. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of a MAC bridge port configuration data
+//		ME. By definition, tag filtering occurs closer to the MAC bridge than the tagging operation.
+//		Schematically, the ordering of the functions is as given in Figure 9.3.11-1:////		Figure 9.3.11-1
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the MAC bridge port configuration
+//			data ME. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Vlan Filter List
+//			VLAN filter list: This attribute is a list of provisioned tag control information (TCI) values
+//			for the bridge port. A TCI, comprising user priority, canonical format indicator (CFI) and
+//			virtual local area network identifier (VID), is represented by 2 bytes. This attribute supports
+//			up to 12 VLAN entries. The first N are valid, where N is given by the number of entries
+//			attribute. (R, W, setbycreate) (mandatory) (24 bytes)
+//
+//		Forward Operation
+//			Table 9.3.11-1 and the actions listed are discussed in detail in the following.
+//
+//		Number Of Entries
+//			Number of entries: This attribute specifies the number of valid entries in the VLAN filter list.
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+type VlanTaggingFilterData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	vlantaggingfilterdataBME = &ManagedEntityDefinition{
+		Name:    "VlanTaggingFilterData",
+		ClassID: 84,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XE000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: MultiByteField("VlanFilterList", 24, nil, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("ForwardOperation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("NumberOfEntries", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+		},
+	}
+}
+
+// NewVlanTaggingFilterData (class ID 84 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVlanTaggingFilterData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(vlantaggingfilterdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/vlantaggingoperationconfigurationdata.go b/vendor/github.com/cboling/omci/generated/vlantaggingoperationconfigurationdata.go
new file mode 100644
index 0000000..def7943
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/vlantaggingoperationconfigurationdata.go
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VlanTaggingOperationConfigurationDataClassId ClassID = ClassID(78)
+
+var vlantaggingoperationconfigurationdataBME *ManagedEntityDefinition
+
+// VlanTaggingOperationConfigurationData (class ID #78)
+//	This ME organizes data associated with VLAN tagging. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	NOTE 1 – The extended VLAN tagging operation configuration data of clause 9.3.13 is preferred
+//	for new implementations.
+//
+//	Relationships
+//		Zero or one instance of this ME may exist for an instance of any ME that can terminate or modify
+//		an Ethernet stream.////		When this ME is associated with a UNI-side TP, it performs its upstream classification and
+//		tagging operations before offering the upstream frame to other filtering, bridging or switching
+//		functions. In the downstream direction, the defined inverse operation is the last operation
+//		performed on the frame before offering it to the UNI-side termination.////		When this ME is associated with an ANI-side TP, it performs its upstream classification and
+//		tagging operations as the last step before queueing for transmission to the OLT, after having
+//		received the upstream frame from other filtering, bridging or switching functions. In the
+//		downstream direction, the defined inverse operation is the first operation performed on the
+//		frame before offering it to possible filter, bridge or switch functions.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. When the
+//			optional association type attribute is 0 or undefined, this attribute's value is the same as the
+//			ID of the ME with which this VLAN tagging operation configuration data instance is associated,
+//			which may be either a PPTP Ethernet UNI or an IP host config data or an IPv6 host config data
+//			ME. Otherwise, the value of the ME ID is unconstrained except by the need to be unique. (R, set-
+//			by-create) (mandatory) (2 bytes)
+//
+//		Upstream Vlan Tagging Operation Mode
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Upstream Vlan Tag Tci Value
+//			Upstream VLAN tag TCI value: This attribute specifies the TCI for upstream VLAN tagging. It is
+//			used when the upstream VLAN tagging operation mode is 1 or 2. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Downstream Vlan Tagging Operation Mode
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Association Type
+//			The associated ME instance is identified by the associated ME pointer. (R, W, setbycreate)
+//			(optional) (1 byte)
+//
+//		Associated Me Pointer
+//			NOTE 3 – When the association type is xDSL, the two MSBs may be used to indicate a bearer
+//			channel.
+//
+type VlanTaggingOperationConfigurationData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	vlantaggingoperationconfigurationdataBME = &ManagedEntityDefinition{
+		Name:    "VlanTaggingOperationConfigurationData",
+		ClassID: 78,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("UpstreamVlanTaggingOperationMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("UpstreamVlanTagTciValue", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("DownstreamVlanTaggingOperationMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: ByteField("AssociationType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5: Uint16Field("AssociatedMePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+		},
+	}
+}
+
+// NewVlanTaggingOperationConfigurationData (class ID 78 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVlanTaggingOperationConfigurationData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(vlantaggingoperationconfigurationdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voiceserviceprofile.go b/vendor/github.com/cboling/omci/generated/voiceserviceprofile.go
new file mode 100644
index 0000000..8ababd1
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voiceserviceprofile.go
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoiceServiceProfileClassId ClassID = ClassID(58)
+
+var voiceserviceprofileBME *ManagedEntityDefinition
+
+// VoiceServiceProfile (class ID #58)
+//	This ME organizes data that describe the voice service functions of the ONU. Instances of this
+//	ME are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of a VoIP voice CTP by way
+//		of a VoIP media profile.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Announcement Type
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Jitter Target
+//			Jitter target:	This attribute specifies the target value of the jitter buffer in milliseconds.
+//			The system tries to maintain the jitter buffer at the target value. The value 0 specifies
+//			dynamic jitter buffer sizing. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Jitter Buffer Max
+//			Jitter buffer max: This attribute specifies the maximum depth of the jitter buffer associated
+//			with this service in milliseconds. The value 0 specifies that the ONU uses its internal default.
+//			(R, W, set-by-create) (optional) (2 bytes)
+//
+//		Echo Cancel Ind
+//			Echo cancel ind: The Boolean value true specifies that echo cancellation is on; false specifies
+//			off. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Pstn Protocol Variant
+//			PSTN protocol variant: This attribute controls which variant of POTS signalling is used on the
+//			associated UNIs. Its value is equal to the [ITU-T E.164] country code. The value 0 specifies
+//			that the ONU uses its internal default. (R, W, set-by-create) (optional) (2 bytes)
+//
+//		Dtmf Digit Levels
+//			DTMF digit levels: This attribute specifies the power level of DTMF digits that may be generated
+//			by the ONU towards the subscriber set. It is a 2s complement value referred to 1 mW at the 0
+//			transmission level point (TLP) (dBm0), with resolution 1 dB. The default value 0x8000 selects
+//			the ONU's internal policy. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Dtmf Digit Duration
+//			DTMF digit duration: This attribute specifies the duration of DTMF digits that may be generated
+//			by the ONU towards the subscriber set. It is specified in milliseconds. The default value 0
+//			selects the ONU's internal policy. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Hook Flash Minimum Time
+//			Hook flash minimum time: This attribute defines the minimum duration recognized by the ONU as a
+//			switchhook flash. It is expressed in milliseconds; the default value 0 selects the ONU's
+//			internal policy. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Hook Flash Maximum Time
+//			Hook flash maximum time: This attribute defines the maximum duration recognized by the ONU as a
+//			switchhook flash. It is expressed in milliseconds; the default value 0 selects the ONU's
+//			internal policy. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Tone Pattern Table
+//			(R, W) (optional) (N * 20 bytes)
+//
+//		Tone Event Table
+//			(R, W) (optional) (N * 7 bytes).
+//
+//		Ringing Pattern Table
+//			(R, W) (optional) (N * 5 bytes).
+//
+//		Ringing Event Table
+//			(R, W) (optional) (N * 7 bytes).
+//
+//		Network Specific Extensions Pointer
+//			Network specific extensions pointer: This attribute points to a network address ME that contains
+//			the path and name of a file containing network specific parameters for the associated UNIs. The
+//			default value for this attribute is 0xFFFF, a null pointer. (R, W, set-by-create) (optional)
+//			(2 bytes)
+//
+type VoiceServiceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voiceserviceprofileBME = &ManagedEntityDefinition{
+		Name:    "VoiceServiceProfile",
+		ClassID: 58,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFC,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("AnnouncementType", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("JitterTarget", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+			3:  Uint16Field("JitterBufferMax", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4:  ByteField("EchoCancelInd", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint16Field("PstnProtocolVariant", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6:  Uint16Field("DtmfDigitLevels", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 6),
+			7:  Uint16Field("DtmfDigitDuration", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8:  Uint16Field("HookFlashMinimumTime", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 8),
+			9:  Uint16Field("HookFlashMaximumTime", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 9),
+			10: MultiByteField("TonePatternTable", 20, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: MultiByteField("ToneEventTable", 7, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: MultiByteField("RingingPatternTable", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: MultiByteField("RingingEventTable", 7, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: Uint16Field("NetworkSpecificExtensionsPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 14),
+		},
+	}
+}
+
+// NewVoiceServiceProfile (class ID 58 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoiceServiceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voiceserviceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voipapplicationserviceprofile.go b/vendor/github.com/cboling/omci/generated/voipapplicationserviceprofile.go
new file mode 100644
index 0000000..4519b6b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voipapplicationserviceprofile.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipApplicationServiceProfileClassId ClassID = ClassID(146)
+
+var voipapplicationserviceprofileBME *ManagedEntityDefinition
+
+// VoipApplicationServiceProfile (class ID #146)
+//	The VoIP application service profile defines attributes of calling features used in conjunction
+//	with a VoIP line service. It is optional for ONUs that support VoIP services. If a non-OMCI
+//	interface is used to manage SIP for VoIP, this ME is unnecessary.
+//
+//	An instance of this ME is created and deleted by the OLT. A VoIP application service profile
+//	instance is needed for each unique set of profile attributes.
+//
+//	Relationships
+//		An instance of this ME is associated with zero or more SIP user data MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Cid Features
+//			The recommended default value is 0x00. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Call Waiting Features
+//			The recommended default value is 0x00. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Call Progress Or Transfer Features
+//			The recommended default value is 0x0000. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Call Presentation Features
+//			The recommended default value is 0x0000. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Direct Connect Feature
+//			The recommended default value is 0x00. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Direct Connect Uri Pointer
+//			Direct connect URI pointer: This attribute points to a network address ME that specifies the URI
+//			of the direct connect. If this attribute is set to a null pointer, no URI is defined. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Bridged Line Agent Uri Pointer
+//			Bridged line agent URI pointer: This attribute points to a network address ME that specifies the
+//			URI of the bridged line agent. If this attribute is set to a null pointer, no URI is defined.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Conference Factory Uri Pointer
+//			Conference factory URI pointer: This attribute points to a network address ME that specifies the
+//			URI of the conference factory. If this attribute is set to a null pointer, no URI is defined.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Dial Tone Feature Delay_ W Armline Timer New
+//			Dial tone feature delay/warmline timer (new): This attribute defines the warmline timer/dial
+//			tone feature delay timer (seconds). The default value 0 specifies vendor-specific
+//			implementation. (R, W) (optional) (2 bytes)
+//
+type VoipApplicationServiceProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voipapplicationserviceprofileBME = &ManagedEntityDefinition{
+		Name:    "VoipApplicationServiceProfile",
+		ClassID: 146,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("CidFeatures", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: ByteField("CallWaitingFeatures", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("CallProgressOrTransferFeatures", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("CallPresentationFeatures", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5: ByteField("DirectConnectFeature", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6: Uint16Field("DirectConnectUriPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7: Uint16Field("BridgedLineAgentUriPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8: Uint16Field("ConferenceFactoryUriPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9: Uint16Field("DialToneFeatureDelayWArmlineTimerNew", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+		},
+	}
+}
+
+// NewVoipApplicationServiceProfile (class ID 146 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipApplicationServiceProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voipapplicationserviceprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voipconfigdata.go b/vendor/github.com/cboling/omci/generated/voipconfigdata.go
new file mode 100644
index 0000000..9dd9e77
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voipconfigdata.go
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipConfigDataClassId ClassID = ClassID(138)
+
+var voipconfigdataBME *ManagedEntityDefinition
+
+// VoipConfigData (class ID #138)
+//	The VoIP configuration data ME defines the configuration for VoIP in the ONU. The OLT uses this
+//	ME to discover the VoIP signalling protocols and configuration methods supported by this ONU.
+//	The OLT then uses this ME to select the desired signalling protocol and configuration method.
+//	The entity is conditionally required for ONUs that offer VoIP services.
+//
+//	An ONU that supports VoIP services automatically creates an instance of this ME.
+//
+//	Relationships
+//		One instance of this ME is associated with the ONU.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. There is only
+//			one instance, number 0. (R) (mandatory) (2 bytes)
+//
+//		Available Signalling Protocols
+//			(R) (mandatory) (1 byte)
+//
+//		Signalling Protocol Used
+//			(R, W) (mandatory) (1 byte)
+//
+//		Available Voip Configuration Methods
+//			Bits 5..24 are reserved by ITU-T. Bits 25..32 are reserved for proprietary vendor configuration
+//			capabilities. (R) (mandatory) (4 bytes)
+//
+//		Voip Configuration Method Used
+//			(R, W) (mandatory) (1 byte)
+//
+//		Voip Configuration Address Pointer
+//			The default value is 0xFFFF (R, W) (mandatory) (2 bytes)
+//
+//		Voip Configuration State
+//			Other values are reserved. At ME instantiation, the ONU sets this attribute to 0. (R)
+//			(mandatory) (1 byte)
+//
+//		Retrieve Profile
+//			Retrieve profile: This attribute provides a means by which the ONU may be notified that a new
+//			VoIP profile should be retrieved. By setting this attribute, the OLT triggers the ONU to
+//			retrieve a new profile. The actual value in the set action is ignored because it is the action
+//			of setting that is important. (W) (mandatory) (1 byte)
+//
+//		Profile Version
+//			Profile version: This attribute is a character string that identifies the version of the last
+//			retrieved profile. (R) (mandatory) (25 bytes)
+//
+type VoipConfigData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voipconfigdataBME = &ManagedEntityDefinition{
+		Name:    "VoipConfigData",
+		ClassID: 138,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("AvailableSignallingProtocols", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("SignallingProtocolUsed", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: Uint32Field("AvailableVoipConfigurationMethods", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: ByteField("VoipConfigurationMethodUsed", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: Uint16Field("VoipConfigurationAddressPointer", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6: ByteField("VoipConfigurationState", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: ByteField("RetrieveProfile", 0, mapset.NewSetWith(Write), false, false, false, false, 7),
+			8: MultiByteField("ProfileVersion", 25, nil, mapset.NewSetWith(Read), true, false, false, false, 8),
+		},
+	}
+}
+
+// NewVoipConfigData (class ID 138 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipConfigData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voipconfigdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voipfeatureaccesscodes.go b/vendor/github.com/cboling/omci/generated/voipfeatureaccesscodes.go
new file mode 100644
index 0000000..5b50616
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voipfeatureaccesscodes.go
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipFeatureAccessCodesClassId ClassID = ClassID(147)
+
+var voipfeatureaccesscodesBME *ManagedEntityDefinition
+
+// VoipFeatureAccessCodes (class ID #147)
+//	The VoIP feature access codes ME defines administrable feature access codes for the VoIP
+//	subscriber. It is optional for ONUs that support VoIP services. If a non-OMCI interface is used
+//	to manage VoIP signalling, this ME is unnecessary.
+//
+//	Instances of this ME are created and deleted by the OLT. A VoIP feature access codes instance is
+//	needed for each unique set of feature access code attributes.
+//
+//	Relationships
+//		An instance of this ME may be associated with one or more SIP user data MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R) (mandatory)
+//			(2 bytes)
+//
+//		Cancel Call Waiting
+//			Cancel call waiting:	(R, W) (optional) (5 bytes)
+//
+//		Call Hold
+//			Call hold:		(R, W) (optional) (5 bytes)
+//
+//		Call Park
+//			Call park:		(R, W) (optional) (5 bytes)
+//
+//		Caller Id Activate
+//			Caller ID activate:	(R, W) (optional) (5 bytes)
+//
+//		Caller Id Deactivate
+//			Caller ID deactivate:	(R, W) (optional) (5 bytes)
+//
+//		Do Not Disturb Activation
+//			Do not disturb activation:	(R, W) (optional) (5 bytes)
+//
+//		Do Not Disturb Deactivation
+//			Do not disturb deactivation:	(R, W) (optional) (5 bytes)
+//
+//		Do Not Disturb Pin Change
+//			Do not disturb PIN change:	(R, W) (optional) (5 bytes)
+//
+//		Emergency Service Number
+//			Emergency service number:	(R, W) (optional) (5 bytes)
+//
+//		Intercom Service
+//			Intercom service:	(R, W) (optional) (5 bytes)
+//
+//		Unattended_Blind Call Transfer
+//			Unattended/blind call transfer:	(R, W) (optional) (5 bytes)
+//
+//		Attended Call Transfer
+//			Attended call transfer:	(R, W) (optional) (5 bytes)
+//
+type VoipFeatureAccessCodes struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voipfeatureaccesscodesBME = &ManagedEntityDefinition{
+		Name:    "VoipFeatureAccessCodes",
+		ClassID: 147,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  MultiByteField("CancelCallWaiting", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 1),
+			2:  MultiByteField("CallHold", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 2),
+			3:  MultiByteField("CallPark", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 3),
+			4:  MultiByteField("CallerIdActivate", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 4),
+			5:  MultiByteField("CallerIdDeactivate", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 5),
+			6:  MultiByteField("DoNotDisturbActivation", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 6),
+			7:  MultiByteField("DoNotDisturbDeactivation", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 7),
+			8:  MultiByteField("DoNotDisturbPinChange", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 8),
+			9:  MultiByteField("EmergencyServiceNumber", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: MultiByteField("IntercomService", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: MultiByteField("UnattendedBlindCallTransfer", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: MultiByteField("AttendedCallTransfer", 5, nil, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewVoipFeatureAccessCodes (class ID 147 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipFeatureAccessCodes(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voipfeatureaccesscodesBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voiplinestatus.go b/vendor/github.com/cboling/omci/generated/voiplinestatus.go
new file mode 100644
index 0000000..9ed609e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voiplinestatus.go
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipLineStatusClassId ClassID = ClassID(141)
+
+var voiplinestatusBME *ManagedEntityDefinition
+
+// VoipLineStatus (class ID #141)
+//	The VoIP line status ME contains line status information for POTS ports using VoIP services. An
+//	ONU that supports VoIP automatically creates or deletes an instance of this ME upon creation or
+//	deletion of a PPTP POTS UNI.
+//
+//	Relationships
+//		An instance of this ME is associated with a PPTP POTS UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP POTS UNI. (R) (mandatory)
+//			(2 bytes)
+//
+//		Voip Codec Used
+//			(R) (mandatory) (2 bytes)
+//
+//		Voip Voice Server Status
+//			(R) (mandatory) (1 byte)
+//
+//		Voip Port Session Type
+//			(R) (mandatory) (1 byte)
+//
+//		Voip Call 1 Packet Period
+//			Voip call 1 packet period: This attribute reports the packet period for the first call on the
+//			VoIP POTS port. The value is defined in milliseconds. (R) (mandatory) (2 bytes)
+//
+//		Voip Call 2 Packet Period
+//			Voip call 2 packet period: This attribute reports the packet period for the second call on the
+//			VoIP POTS port. The value is defined in milliseconds. (R) (mandatory) (2 bytes)
+//
+//		Voip Call 1 Dest Addr
+//			Voip call 1 dest addr: This attribute reports the DA for the first call on the VoIP POTS port.
+//			The value is an ASCII string. (R) (mandatory) (25 bytes)
+//
+//		Voip Call 2 Dest Addr
+//			Voip call 2 dest addr: This attribute reports the DA for the second call on the VoIP POTS port.
+//			The value is an ASCII string. (R) (mandatory) (25 bytes)
+//
+//		Voip Line State
+//			(R) (optional) (1 byte)
+//
+//		Emergency Call Status
+//			(R) (Optional) (1 byte)
+//
+type VoipLineStatus struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voiplinestatusBME = &ManagedEntityDefinition{
+		Name:    "VoipLineStatus",
+		ClassID: 141,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFF80,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: Uint16Field("VoipCodecUsed", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("VoipVoiceServerStatus", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: ByteField("VoipPortSessionType", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint16Field("VoipCall1PacketPeriod", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint16Field("VoipCall2PacketPeriod", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: MultiByteField("VoipCall1DestAddr", 25, nil, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: MultiByteField("VoipCall2DestAddr", 25, nil, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: ByteField("VoipLineState", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9: ByteField("EmergencyCallStatus", 0, mapset.NewSetWith(Read), true, false, true, false, 9),
+		},
+	}
+}
+
+// NewVoipLineStatus (class ID 141 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipLineStatus(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voiplinestatusBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voipmediaprofile.go b/vendor/github.com/cboling/omci/generated/voipmediaprofile.go
new file mode 100644
index 0000000..c25179e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voipmediaprofile.go
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipMediaProfileClassId ClassID = ClassID(142)
+
+var voipmediaprofileBME *ManagedEntityDefinition
+
+// VoipMediaProfile (class ID #142)
+//	The VoIP media profile ME contains settings that apply to VoIP voice encoding. This entity is
+//	conditionally required for ONUs that offer VoIP services. If a non-OMCI interface is used to
+//	manage VoIP signalling, this ME is unnecessary.
+//
+//	An instance of this ME is created and deleted by the OLT. A VoIP media profile is needed for
+//	each unique set of profile attributes.
+//
+//	Relationships
+//		An instance of this ME may be associated with one or more VoIP voice CTP MEs.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Fax Mode
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Voice Service Profile Pointer
+//			Voice service profile pointer: Pointer to a voice service profile, which defines parameters such
+//			as jitter buffering and echo cancellation. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Codec Selection 1st Order
+//			(R, W, set-by-create) (mandatory) (1 byte)
+//
+//		Packet Period Selection 1st Order
+//			Packet period selection (1st order): This attribute specifies the packet period selection
+//			interval in milliseconds. The recommended default value is 10 ms. Valid values are 10..30 ms.
+//			(R, W, set-by-create) (mandatory) (1 byte)
+//
+//		Silence Suppression 1st Order
+//			Silence suppression (1st order): This attribute specifies whether silence suppression is on or
+//			off. Valid values are 0 = off and 1 = on. (R, W, set-by-create) (mandatory) (1 byte)
+//
+//		Codec Selection 2nd Order
+//			Codec selection (2nd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Packet Period Selection 2nd Order
+//			Packet period selection (2nd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Silence Suppression 2nd Order
+//			Silence suppression (2nd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Codec Selection 3rd Order
+//			Codec selection (3rd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Packet Period Selection 3rd Order
+//			Packet period selection (3rd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Silence Suppression 3rd Order
+//			Silence suppression (3rd order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Codec Selection 4th Order
+//			Codec selection (4th order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Packet Period Selection 4th Order
+//			Packet period selection (4th order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Silence Suppression 4th Order
+//			Silence suppression (4th order):	(R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Oob Dtmf
+//			OOB DTMF:	This attribute specifies out-of-band DMTF carriage. When enabled (1), DTMF signals are
+//			carried out of band via RTP or the associated signalling protocol. When disabled (0), DTMF tones
+//			are carried in the PCM stream. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Rtp Profile Pointer
+//			RTP profile pointer: This attribute points to the associated RTP profile data ME. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+type VoipMediaProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voipmediaprofileBME = &ManagedEntityDefinition{
+		Name:    "VoipMediaProfile",
+		ClassID: 142,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("FaxMode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint16Field("VoiceServiceProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("CodecSelection1StOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  ByteField("PacketPeriodSelection1StOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  ByteField("SilenceSuppression1StOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  ByteField("CodecSelection2NdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  ByteField("PacketPeriodSelection2NdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 7),
+			8:  ByteField("SilenceSuppression2NdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("CodecSelection3RdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: ByteField("PacketPeriodSelection3RdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: ByteField("SilenceSuppression3RdOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 11),
+			12: ByteField("CodecSelection4ThOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 12),
+			13: ByteField("PacketPeriodSelection4ThOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 13),
+			14: ByteField("SilenceSuppression4ThOrder", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 14),
+			15: ByteField("OobDtmf", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 15),
+			16: Uint16Field("RtpProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewVoipMediaProfile (class ID 142 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipMediaProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voipmediaprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/voipvoicectp.go b/vendor/github.com/cboling/omci/generated/voipvoicectp.go
new file mode 100644
index 0000000..08060e2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/voipvoicectp.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VoipVoiceCtpClassId ClassID = ClassID(139)
+
+var voipvoicectpBME *ManagedEntityDefinition
+
+// VoipVoiceCtp (class ID #139)
+//	The VoIP voice CTP defines the attributes necessary to associate a specified VoIP service (SIP,
+//	ITUT H.248) with a POTS UNI. This entity is conditionally required for ONUs that offer VoIP
+//	services. If a non-OMCI interface is used to manage VoIP signalling, this ME is unnecessary.
+//
+//	An instance of this ME is created and deleted by the OLT. A VoIP voice CTP ME is needed for each
+//	PPTP POTS UNI served by VoIP.
+//
+//	Relationships
+//		An instance of this ME links a PPTP POTS UNI ME with a VoIP media profile and a SIP user data or
+//		media gateway controller (MGC) config data ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		User Protocol Pointer
+//			User protocol pointer: This attribute points to signalling protocol data. If the signalling
+//			protocol used attribute of the VoIP config data ME specifies that the ONU's signalling protocol
+//			is SIP, this attribute points to a SIP user data ME, which in turn points to a SIP agent config
+//			data ME. If the signalling protocol is ITU-T H.248, this attribute points directly to an MGC
+//			config data ME. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Pptp Pointer
+//			PPTP pointer: This attribute points to the PPTP POTS UNI ME that serves the analogue telephone
+//			port. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		V O Ip Media Profile Pointer
+//			VoIP media profile pointer: This attribute points to an associated VoIP media profile. (R, W,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Signalling Code
+//			(R, W, setbycreate) (mandatory) (1 byte)
+//
+type VoipVoiceCtp struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	voipvoicectpBME = &ManagedEntityDefinition{
+		Name:    "VoipVoiceCtp",
+		ClassID: 139,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("UserProtocolPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("PptpPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("VOIpMediaProfilePointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: ByteField("SignallingCode", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+		},
+	}
+}
+
+// NewVoipVoiceCtp (class ID 139 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVoipVoiceCtp(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(voipvoicectpBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/vpnetworkctp.go b/vendor/github.com/cboling/omci/generated/vpnetworkctp.go
new file mode 100644
index 0000000..8950e2f
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/vpnetworkctp.go
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VpNetworkCtpClassId ClassID = ClassID(269)
+
+var vpnetworkctpBME *ManagedEntityDefinition
+
+// VpNetworkCtp (class ID #269)
+//	NOTE – In [ITU-T G.984.4], this ME is called VP network CTP-G.
+//
+//	This ME represents the termination of VP links on an ONU. It aggregates connectivity
+//	functionality from the network view and alarms from the network element view as well as
+//	artefacts from trails. Instances of this ME are created and deleted by the OLT.
+//
+//	An instance of the VP network CTP ME can be deleted only when no ATM IW VCC TP is associated
+//	with it. It is the responsibility of the OLT to ensure that this condition is met.
+//
+//	Relationships
+//		Zero or more instances of the VP network CTP ME may exist for each instance of the IW VCC TP ME.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. (R, setbycreate)
+//			(mandatory) (2 bytes)
+//
+//		Vpi Value
+//			VPI value:	This attribute identifies the VPI value associated with the VP link being terminated.
+//			(R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Uni Pointer
+//			UNI pointer: This pointer indicates the xDSL PPTP UNI associated with this VP TP. The bearer
+//			channel may be indicated by the two MSBs of the pointer. (R, W, setbycreate) (mandatory)
+//			(2 bytes)
+//
+//		Direction
+//			Direction:	This attribute specifies whether the VP link is used for UNI-to-ANI (value 1), ANI-
+//			to-UNI (value 2), or bidirectional (value 3) connection. (R, W, setbycreate) (mandatory)
+//			(1 byte)
+//
+//		Deprecated 1
+//			Deprecated 1: Not used; should be set to 0. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Deprecated 2
+//			Deprecated 2: Not used; should be set to 0. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Deprecated 3
+//			Deprecated 3: Not used; should be set to 0. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Deprecated 4
+//			Deprecated 4: Not used; if present, should be set to 0. (R) (optional) (1 byte)
+//
+type VpNetworkCtp struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	vpnetworkctpBME = &ManagedEntityDefinition{
+		Name:    "VpNetworkCtp",
+		ClassID: 269,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFE00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint16Field("VpiValue", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: Uint16Field("UniPointer", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: ByteField("Direction", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4: Uint16Field("Deprecated1", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, true, 4),
+			5: Uint16Field("Deprecated2", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, true, 5),
+			6: Uint16Field("Deprecated3", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, true, 6),
+			7: ByteField("Deprecated4", 0, mapset.NewSetWith(Read), false, false, true, true, 7),
+		},
+	}
+}
+
+// NewVpNetworkCtp (class ID 269 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVpNetworkCtp(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(vpnetworkctpBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/vpperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/vpperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..72909be
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/vpperformancemonitoringhistorydata.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const VpPerformanceMonitoringHistoryDataClassId ClassID = ClassID(62)
+
+var vpperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// VpPerformanceMonitoringHistoryData (class ID #62)
+//	This ME collects PM data associated with a VP network CTP. Instances of this ME are created and
+//	deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME is associated with an instance of the VP network CTP ME. The performance
+//		of upstream ATM flows is reported.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the VP network CTP. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Lost C 0 1 Cells
+//			Lost C = 0 + 1 cells: This attribute counts all cell loss. It cannot distinguish between cells
+//			lost because of header bit errors, ATM-level header errors, cell policing, or buffer overflows.
+//			It records only loss of information, independent of the priority of the cell. (R) (mandatory)
+//			(2 bytes)
+//
+//		Lost C_= 0 Cells
+//			Lost C = 0 cells: This attribute counts loss of high priority cells. It cannot distinguish
+//			between cells lost because of header bit errors, ATM-level header errors, cell policing, or
+//			buffer overflows. It records only loss of high priority cells. (R) (mandatory) (2 bytes)
+//
+//		Misinserted Cells
+//			Misinserted cells: This attribute counts cells that are misrouted to a monitored VP. (R)
+//			(mandatory) (2 bytes)
+//
+//		Transmitted C_= 0 _ 1 Cells
+//			Transmitted C = 0 + 1 cells: This attribute counts cells originated by the transmitting end
+//			point (i.e., backward reporting is assumed). (R) (mandatory) (5 bytes)
+//
+//		Transmitted C_= 0 Cells
+//			Transmitted C = 0 cells: This attribute counts high priority cells originated by the
+//			transmitting end point (i.e., backward reporting is assumed). (R) (mandatory) (5 bytes)
+//
+//		Impaired Block
+//			Impaired blocks: This severely errored cell block counter is incremented whenever one of the
+//			following events takes place: the number of misinserted cells reaches its threshold; the number
+//			of bipolar violations reaches its threshold; or the number of lost cells reaches its threshold.
+//			Threshold values are based on vendor-operator negotiation. (R) (mandatory) (2 bytes)
+//
+type VpPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	vpperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "VpPerformanceMonitoringHistoryData",
+		ClassID: 62,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("LostC01Cells", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint16Field("LostC=0Cells", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint16Field("MisinsertedCells", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: MultiByteField("TransmittedC=01Cells", 5, nil, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: MultiByteField("TransmittedC=0Cells", 5, nil, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint16Field("ImpairedBlock", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewVpPerformanceMonitoringHistoryData (class ID 62 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewVpPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(vpperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofile.go b/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofile.go
new file mode 100644
index 0000000..d88b93e
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofile.go
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslChannelConfigurationProfileClassId ClassID = ClassID(107)
+
+var xdslchannelconfigurationprofileBME *ManagedEntityDefinition
+
+// XdslChannelConfigurationProfile (class ID #107)
+//	This ME contains the channel configuration profile for an xDSL UNI. An instance of this ME is
+//	created and deleted by the OLT.
+//
+//	NOTE – If [ITUT G.997.1] compatibility is required, bit rates should only be set to integer
+//	multiples of 1000 bits/s. The ONU may reject attempts to set other values for bit rate
+//	attributes.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Minimum Data Rate
+//			Minimum data rate: This parameter specifies the minimum desired net data rate for the bearer
+//			channel. It is coded in bits per second. (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Maximum Data Rate
+//			Maximum data rate: This parameter specifies the maximum desired net data rate for the bearer
+//			channel. It is coded in bits per second. (R, W, setbycreate) (mandatory) (4 bytes)
+//
+//		Rate Adaptation Ratio
+//			Rate adaptation ratio: This attribute specifies the weight that should be taken into account
+//			when performing rate adaptation in the direction of the bearer channel. The attribute is defined
+//			as a percentage. The value 20, for example, means that 20% of the available data rate (in excess
+//			of the minimum data rate summed over all bearer channels) is assigned to this bearer channel and
+//			80% to the other bearer channels. The OLT must ensure that the sum of rate adaptation ratios
+//			over all bearers in one direction is 100%. (R, W, setbycreate) (optional) (1 byte)
+//
+//		Maximum Interleaving Delay
+//			The delay is coded in milliseconds, varying from 2 to 63, with special meaning assigned to
+//			values 0, 1 and 255. The value 0 indicates that no delay bound is imposed. The value 1 indicates
+//			the fast latency path is to be used in the ITUT G.992.1 operating mode and S and D are to be
+//			selected such that S  1 and D = 1 in ITU-T G.992.2, ITUT G.992.3, ITUT G.992.4, ITUT G.992.5 and
+//			ITUT G.993.2 operating modes. The value 255 indicates a delay bound of 1 ms in ITUT G.993.2
+//			operation. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Data Rate Threshold Upshift
+//			Data rate threshold upshift: This attribute is a threshold on the cumulative data rate upshift
+//			achieved over one or more bearer channel data rate adaptations. An upshift rate change (DRT up)
+//			notification is issued by the PPTP xDSL UNI part 1 when the actual data rate exceeds the data
+//			rate at the last entry into showtime by more than the threshold. The data rate threshold is
+//			coded in bits per second. (R, W, setbycreate) (mandatory for xDSL standards that use this
+//			attribute) (4 bytes)
+//
+//		Data Rate Threshold Downshift
+//			Data rate threshold downshift: This attribute is a threshold on the cumulative data rate
+//			downshift achieved over one or more bearer channel data rate adaptations. A downshift rate
+//			change (DRT down) notification is issued by the PPTP xDSL UNI part 1 when the actual data rate
+//			is below the data rate at the last entry into showtime by more than the threshold. The data rate
+//			threshold is coded in bits per second. (R, W, setbycreate) (mandatory for xDSL standards that
+//			use this attribute) (4 bytes)
+//
+//		Minimum Reserved Data Rate
+//			Minimum reserved data rate: This attribute specifies the desired minimum reserved net data rate
+//			for the bearer channel. The rate is coded in bits per second. This attribute is needed only if
+//			the rate adaptation mode is set to dynamic in the xDSL line configuration profile part 1. (R, W,
+//			setbycreate) (optional) (4 bytes)
+//
+//		Minimum Data Rate In Low _ Power State
+//			Minimum data rate in low-power state: This parameter specifies the minimum desired net data rate
+//			for the bearer channel during the low-power state (L1/L2). The power management low-power states
+//			L1 and L2 are defined in [ITUT G.992.2] and [ITUT G.992.3], respectively. The data rate is coded
+//			in bits per second. (R, W, setbycreate) (mandatory) (4 byte)
+//
+//		Minimum Impulse Noise Protection
+//			(R, W, setbycreate) (optional for [ITU-T G.992.1], mandatory for other xDSL standards that use
+//			this attribute) (1 byte)
+//
+//		Maximum Bit Error Ratio
+//			(R, W, setbycreate) (mandatory for standards that use this attribute) (1 byte)
+//
+//		Minimum Impulse Noise Protection 8_Khz
+//			Minimum impulse noise protection 8 kHz: The INPmin8 attribute specifies the minimum INP for the
+//			bearer channel if it is transported over DMT symbols with a subcarrier spacing of 8.625 kHz. It
+//			is only valid for [ITUT G.993.2]. INP is expressed in DMT symbols with a subcarrier spacing of
+//			8.625 kHz. It can take any integer value from 0 (default) to 16, inclusive. (R, W) (mandatory
+//			for [ITUT G.993.2]) (1 byte)
+//
+//		Maximum Delay Variation
+//			Maximum delay variation: The DVMAX attribute specifies the maximum value for delay variation
+//			allowed in an OLR procedure. Its value ranges from 1 (0.1 ms) to 254 (25.4 ms). The special
+//			value 255 specifies that no delay variation bound is imposed. (R, W) (optional: used by
+//			[ITUT G.993.2]) (1 byte)
+//
+//		Channel Initialization Policy Selection
+//			Channel initialization policy selection: The CIPOLICY attribute specifies the policy to
+//			determine transceiver configuration at initialization. Valid values are 0..1, as defined in the
+//			Recommendations that use this attribute. (R, W) (optional) (1 byte)
+//
+//		Minimum Sos Bit Rate Downstream
+//			Minimum SOS bit rate downstream: The MIN-SOS-BR-ds attribute specifies the minimum net data rate
+//			required for a valid SOS request in the downstream direction. The value is coded as an unsigned
+//			integer representing the data rate as a multiple of 8 kbit/s. (R, W) (optional) (4 bytes)
+//
+//		Minimum Sos Bit Rate Upstream
+//			Minimum SOS bit rate upstream: The MIN-SOS-BR-us attribute specifies the minimum net data rate
+//			required for a valid SOS request in the upstream direction. The value is coded as an unsigned
+//			integer representing the data rate as a multiple of 8 kbit/s. (R, W) (optional) (4 bytes)
+//
+type XdslChannelConfigurationProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslchannelconfigurationprofileBME = &ManagedEntityDefinition{
+		Name:    "XdslChannelConfigurationProfile",
+		ClassID: 107,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint32Field("MinimumDataRate", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  Uint32Field("MaximumDataRate", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("RateAdaptationRatio", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4:  ByteField("MaximumInterleavingDelay", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  Uint32Field("DataRateThresholdUpshift", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 5),
+			6:  Uint32Field("DataRateThresholdDownshift", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 6),
+			7:  Uint32Field("MinimumReservedDataRate", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8:  Uint32Field("MinimumDataRateInLowPowerState", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("MinimumImpulseNoiseProtection", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: ByteField("MaximumBitErrorRatio", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: ByteField("MinimumImpulseNoiseProtection8Khz", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: ByteField("MaximumDelayVariation", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 12),
+			13: ByteField("ChannelInitializationPolicySelection", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: Uint32Field("MinimumSosBitRateDownstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: Uint32Field("MinimumSosBitRateUpstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewXdslChannelConfigurationProfile (class ID 107 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslChannelConfigurationProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslchannelconfigurationprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofilepart2.go b/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofilepart2.go
new file mode 100644
index 0000000..cc7a5a0
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslchannelconfigurationprofilepart2.go
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslChannelConfigurationProfilePart2ClassId ClassID = ClassID(412)
+
+var xdslchannelconfigurationprofilepart2BME *ManagedEntityDefinition
+
+// XdslChannelConfigurationProfilePart2 (class ID #412)
+//	This ME contains the channel configuration profile for an xDSL UNI. An instance of this ME is
+//	created and deleted by the OLT.
+//
+//	NOTE – If [ITUT G.997.1] compatibility is required, bit rates should only be set to integer
+//	multiples of 1000 bits/s. The ONU may reject attempts to set other values for bit rate
+//	attributes.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the xDSL channel configuration
+//			profile. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Minimum Expected Throughput For Retransmission Minetr_Rtx
+//			Minimum expected throughput for retransmission (MINETR_RTX): If retransmission is used in a
+//			given transmit direction, this attribute specifies the minimum expected throughput for the
+//			bearer channel, in bits per second. See clause 7.3.2.1.8 of [ITU-T G.997.1]. (R, W) (mandatory)
+//			(4 bytes)
+//
+//		Maximum Expected Throughput For Retransmission Maxetr_Rtx
+//			Maximum expected throughput for retransmission (MAXETR_RTX): If retransmission is used in a
+//			given transmit direction, this parameter specifies the maximum expected throughput for the
+//			bearer channel, in bits per second. See clause 7.3.2.1.9 of [ITU-T G.997.1]. (R, W) (mandatory)
+//			(4 bytes)
+//
+//		Maximum Net Data Rate For Retransmission Maxndr_Rtx
+//			Maximum net data rate for retransmission (MAXNDR_RTX): If retransmission is used in a given
+//			transmit direction, this parameter specifies the maximum net data rate for the bearer channel,
+//			in bits per second. See clause 7.3.2.1.10 of [ITUT G.997.1]. (R, W) (mandatory) (4 bytes)
+//
+//		Maximum Delay For Retransmission Delaymax_Rtx
+//			Maximum delay for retransmission (DELAYMAX_RTX): If retransmission is used in a given transmit
+//			direction, this parameter specifies the maximum for the instantaneous delay due to the effect of
+//			retransmission only. This delay is defined as the integer value of this attribute multiplied by
+//			1 ms. The valid delay values are given in clause 7.3.2.11 of [ITU-T G.997.1]. (R, W) (mandatory)
+//			(1 bytes)
+//
+//		Minimum Delay For Retransmission Delaymin_Rtx
+//			Minimum delay for retransmission (DELAYMIN_RTX): If retransmission is used in a given transmit
+//			direction, this parameter specifies the minimum for the instantaneous delay due to the effect of
+//			retransmission only. This delay is defined as the integer value of this attribute multiplied by
+//			1 ms. The valid delay values are given in clause 7.3.2.12 of [ITU-T G.997.1]. (R, W) (mandatory)
+//			(1 bytes)
+//
+//		Minimum Impulse Noise Protection Against Single High Impulse Noise Event Shine For Retransmission Inpmin_Shine_Rtx
+//			Minimum impulse noise protection against single high impulse noise event (SHINE) for
+//			retransmission (INPMIN_SHINE_RTX): If retransmission is used in a given transmit direction, this
+//			parameter specifies the minimum INP against a SHINE for the bearer channel if it is transported
+//			over DMT symbols with a subcarrier spacing of 4.3125 kHz. The valid range of values is given in
+//			clause 7.3.2.13 of [ITU-T G.997.1]. (R, W) (mandatory) (1 bytes)
+//
+//		Minimum Impulse Noise Protection Against Shine For Retransmission For Systems Using 8.625 Khz Subcarrier Spacing Inpmin8_Shine_Rtx
+//			Minimum impulse noise protection against SHINE for retransmission for systems using 8.625 kHz
+//			subcarrier spacing (INPMIN8_SHINE_RTX): If retransmission is used in a given transmit direction,
+//			this parameter specifies the minimum INP against SHINE for the bearer channel if it is
+//			transported over DMT symbols with a subcarrier spacing of 8.625 kHz. The valid range of values
+//			is given in clause 7.3.2.14 of [ITUT G.997.1]. (R, W) (mandatory) (1 bytes)
+//
+//		Shineratio_Rtx
+//			SHINERATIO_RTX: If retransmission is used in a given transmit direction, this parameter
+//			specifies the SHINE ratio. This ratio is defined as the integer value of this attribute
+//			multiplied by 0.001. The valid range of values is given in clause 7.3.2.15 of [ITU-T G.997.1].
+//			(R, W) (mandatory) (1 bytes)
+//
+//		Minimum Impulse Noise Protection Against Rein For Retransmission Inpmin_Rein_Rtx
+//			Minimum impulse noise protection against REIN for retransmission (INPMIN_REIN_RTX): If
+//			retransmission is used in a given transmit direction, this parameter specifies the minimum INP
+//			against REIN for the bearer channel if it is transported over DMT symbols with a subcarrier
+//			spacing of 4.3125 kHz. The valid range of values is given in clause 7.3.2.16 of [ITU-T G.997.1].
+//			(R, W) (mandatory) (1 bytes)
+//
+//		Minimum Impulse Noise Protection Against Rein For Retransmission For Systems Using 8.625_Khz Subcarrier Spacing Inpmin8_Rein_Rtx
+//			Minimum impulse noise protection against REIN for retransmission for systems using 8.625 kHz
+//			subcarrier spacing (INPMIN8_REIN_RTX): If retransmission is used in a given transmit direction,
+//			this parameter specifies the minimum INP against REIN for the bearer channel if it is
+//			transported over DMT symbols with a subcarrier spacing of 8.625 kHz. The valid range of values
+//			is given in clause 7.3.2.17 of [ITU-T G.997.1]. (R, W) (mandatory) (1 bytes)
+//
+//		Rein Inter_Arrival Time For Retransmission Iat_Rein_Rtx
+//			REIN inter-arrival time for retransmission (IAT_REIN_RTX): If retransmission is used in a given
+//			transmit direction, this parameter specifies the IAT that shall be assumed for REIN protection.
+//			The valid range of values is given in clause 7.3.2.18 of [ITU-T G.997.1]. (R, W) (mandatory)
+//			(1 bytes)
+//
+//		Target Net Data Rate Target_Ndr
+//			Target net data rate (TARGET_NDR): If retransmission is not used in a given transmit direction,
+//			this parameter specifies the target net data of the bearer channel, in bits per second. See
+//			clause 7.3.2.19.1 of [ITU-T G.997.1]. (R, W) (mandatory) (4 bytes)
+//
+//		Target Expected Throughput For Retransmission Target_Etr
+//			Target expected throughput for retransmission (TARGET_ETR): If retransmission is used in a given
+//			transmit direction, this parameter specifies the target expected throughput for the bearer
+//			channel, in bits per second. See clause 7.3.2.19.2 of [ITUT G.997.1]. (R, W) (mandatory)
+//			(4 bytes)
+//
+type XdslChannelConfigurationProfilePart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslchannelconfigurationprofilepart2BME = &ManagedEntityDefinition{
+		Name:    "XdslChannelConfigurationProfilePart2",
+		ClassID: 412,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF8,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint32Field("MinimumExpectedThroughputForRetransmissionMinetrRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 1),
+			2:  Uint32Field("MaximumExpectedThroughputForRetransmissionMaxetrRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3:  Uint32Field("MaximumNetDataRateForRetransmissionMaxndrRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4:  ByteField("MaximumDelayForRetransmissionDelaymaxRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5:  ByteField("MinimumDelayForRetransmissionDelayminRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  ByteField("MinimumImpulseNoiseProtectionAgainstSingleHighImpulseNoiseEventShineForRetransmissionInpminShineRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("MinimumImpulseNoiseProtectionAgainstShineForRetransmissionForSystemsUsing8625KhzSubcarrierSpacingInpmin8ShineRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 7),
+			8:  ByteField("ShineratioRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 8),
+			9:  ByteField("MinimumImpulseNoiseProtectionAgainstReinForRetransmissionInpminReinRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 9),
+			10: ByteField("MinimumImpulseNoiseProtectionAgainstReinForRetransmissionForSystemsUsing8625KhzSubcarrierSpacingInpmin8ReinRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 10),
+			11: ByteField("ReinInterArrivalTimeForRetransmissionIatReinRtx", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 11),
+			12: Uint32Field("TargetNetDataRateTargetNdr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 12),
+			13: Uint32Field("TargetExpectedThroughputForRetransmissionTargetEtr", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 13),
+		},
+	}
+}
+
+// NewXdslChannelConfigurationProfilePart2 (class ID 412 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslChannelConfigurationProfilePart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslchannelconfigurationprofilepart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslchanneldownstreamstatusdata.go b/vendor/github.com/cboling/omci/generated/xdslchanneldownstreamstatusdata.go
new file mode 100644
index 0000000..77c9ba2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslchanneldownstreamstatusdata.go
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslChannelDownstreamStatusDataClassId ClassID = ClassID(102)
+
+var xdslchanneldownstreamstatusdataBME *ManagedEntityDefinition
+
+// XdslChannelDownstreamStatusData (class ID #102)
+//	This ME contains downstream channel status data for an xDSL UNI. The ONU automatically creates
+//	or deletes instances of this ME upon the creation or deletion of a PPTP xDSL UNI part 1.
+//
+//	NOTE – [ITU-T G.997.1] specifies that bit rate attributes have a granularity of 1000 bit/s. If
+//	ITU-T G.997.1 compliance is required, the ONU should only report values with this granularity.
+//
+//	Relationships
+//		One or more instances of this ME are associated with an instance of an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Actual Interleaving Delay
+//			Actual interleaving delay: This attribute is the actual one-way interleaving delay introduced by
+//			the PMS-TC between the alpha and beta reference points, excluding delay in the L1 and L2 states.
+//			In the L1 and L2 states, the attribute contains the interleaving delay in the previous L0 state.
+//			For ADSL, this attribute is derived from the S and D attributes as cap(S*D)/4 ms, where S is the
+//			number of symbols per codeword, D is the interleaving depth and cap() denotes rounding to the
+//			next higher integer. For [ITU-T G.993.2], this attribute is computed according to the formula in
+//			clause 9.7 of [ITUT G.993.2]. The actual interleaving delay is coded in milliseconds, rounded to
+//			the nearest millisecond. (R) (mandatory) (1 byte)
+//
+//		Actual Data Rate
+//			Actual data rate: This parameter reports the actual net data rate of the bearer channel,
+//			excluding the rate in the L1 and L2 states. In the L1 or L2 state, the parameter contains the
+//			net data rate in the previous L0 state. The data rate is coded in bits per second. (R)
+//			(mandatory) (4 bytes)
+//
+//		Previous Data Rate
+//			Previous data rate: This parameter reports the previous net data rate of the bearer channel just
+//			before the latest rate change event occurred, excluding transitions between the L0 state and the
+//			L1 or L2 states. A rate change can occur at a power management state transition, e.g., at full
+//			or short initialization, fast retrain or power down, or at a dynamic rate adaptation. The rate
+//			is coded in bits per second (R) (mandatory) (4 bytes)
+//
+//		Actual Impulse Noise Protection
+//			Actual impulse noise protection: The ACTINP attribute reports the actual INP on the bearer
+//			channel in the L0 state. In the L1 or L2 state, the attribute contains the INP in the previous
+//			L0 state. The value of this attribute is a number of DMT symbols, with a granularity of 0.1
+//			symbols. Its range is from 0 (0.0 symbols) to 254 (25.4 symbols). The special value 255
+//			indicates an ACTINP higher than 25.4. (R) (optional for [ITU-T G.992.1], mandatory for other
+//			xDSL Recommendations that support this attribute) (1 byte)
+//
+//		Actual Size Of Reed_Solomon Codeword
+//			Actual size of Reed-Solomon codeword: The NFEC attribute reports the actual Reed-Solomon
+//			codeword size used in the latency path in which the bearer channel is transported. The value is
+//			coded in bytes, and ranges from 0..255. (R) (mandatory for ITUT G.993.2 VDSL2, optional for
+//			others) (1 byte)
+//
+//		Actual Number Of Reed_Solomon Redundancy Bytes
+//			Actual number of Reed-Solomon redundancy bytes: The RFEC attribute reports the actual number of
+//			Reed-Solomon redundancy bytes per codeword used in the latency path in which the bearer channel
+//			is transported. The value is coded in bytes, and ranges from 0..16. The value 0 indicates no
+//			Reed-Solomon coding. (R) (mandatory for ITUT G.993.2 VDSL2, optional for others) (1 byte)
+//
+//		Actual Number Of Bits Per Symbol
+//			Actual number of bits per symbol: The LSYMB attribute reports the actual number of bits per
+//			symbol assigned to the latency path in which the bearer channel is transported, excluding
+//			trellis overhead. The value is coded in bits, and ranges from 0..65535. (R) (mandatory for [TU-T
+//			G.993.2 VDSL2, optional for others) (2 bytes)
+//
+//		Actual Interleaving Depth
+//			Actual interleaving depth: The INTLVDEPTH attribute reports the actual depth of the interleaver
+//			used in the latency path in which the bearer channel is transported. The value ranges from
+//			1..4096 in steps of 1. The value 1 indicates no interleaving. (R) (mandatory for ITU-T G.993.2
+//			VDSL2, optional for others) (2 bytes)
+//
+//		Actual Interleaving Block Length
+//			Actual interleaving block length: The INTLVBLOCK attribute reports the actual block length of
+//			the interleaver used in the latency path in which the bearer channel is transported. The value
+//			ranges from 4..255 in steps of 1. (R) (mandatory for ITU-T G.993.2 VDSL2, undefined for others)
+//			(1 byte)
+//
+//		Actual Latency Path
+//			Actual latency path: The LPATH attribute reports the index of the actual latency path in which
+//			the bearer channel is transported. Valid values are 0..3. In [ITUT G.992.1], the fast path is
+//			mapped to latency index 0; the interleaved path to index 1. (R) (mandatory for ITU-T G.993.2
+//			VDSL2, optional for others) (1 byte)
+//
+//		Actual Impulse Noise Protection Against R Epetitive E Lectrical I Mpulse N Oise Actinp_Rein
+//			Actual impulse noise protection against repetitive electrical impulse noise (ACTINP_REIN): If
+//			retransmission is used in a given transmit direction, this parameter reports the actual INP
+//			against REIN on the bearer channel. The INP of this attribute is equal to the integer value
+//			multiplied by 0.1 symbols. Valid values and usage are given in clause 7.5.2.9 of [ITU-T G.997.1]
+//			(R) (optional) (1 byte)
+//
+type XdslChannelDownstreamStatusData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslchanneldownstreamstatusdataBME = &ManagedEntityDefinition{
+		Name:    "XdslChannelDownstreamStatusData",
+		ClassID: 102,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("ActualInterleavingDelay", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint32Field("ActualDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint32Field("PreviousDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  ByteField("ActualImpulseNoiseProtection", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  ByteField("ActualSizeOfReedSolomonCodeword", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  ByteField("ActualNumberOfReedSolomonRedundancyBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint16Field("ActualNumberOfBitsPerSymbol", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("ActualInterleavingDepth", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  ByteField("ActualInterleavingBlockLength", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: ByteField("ActualLatencyPath", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: ByteField("ActualImpulseNoiseProtectionAgainstREpetitiveELectricalIMpulseNOiseActinpRein", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+		},
+	}
+}
+
+// NewXdslChannelDownstreamStatusData (class ID 102 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslChannelDownstreamStatusData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslchanneldownstreamstatusdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslchannelupstreamstatusdata.go b/vendor/github.com/cboling/omci/generated/xdslchannelupstreamstatusdata.go
new file mode 100644
index 0000000..47923d2
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslchannelupstreamstatusdata.go
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslChannelUpstreamStatusDataClassId ClassID = ClassID(103)
+
+var xdslchannelupstreamstatusdataBME *ManagedEntityDefinition
+
+// XdslChannelUpstreamStatusData (class ID #103)
+//	This ME contains upstream channel status data for an xDSL UNI. The ONU automatically creates or
+//	deletes instances of this ME upon the creation or deletion of a PPTP xDSL UNI part 1.
+//
+//	Relationships
+//		One or more instances of this ME are associated with an instance of an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Actual Interleaving Delay
+//			Actual interleaving delay: This attribute is the actual one-way interleaving delay introduced by
+//			the PMS-TC between the alpha and beta reference points, excluding the L1 and L2 states. In the
+//			L1 and L2 states, this attribute contains the interleaving delay in the previous L0 state. For
+//			ADSL, this attribute is derived from the S and D attributes as cap(S*D)/4 ms, where S is the
+//			number of symbols per codeword, D is the interleaving depth and cap() denotes rounding to the
+//			next higher integer. For [ITU-T G.993.2], this attribute is computed according to the formula in
+//			clause 9.7 of [ITUT G.993.2]. The actual interleaving delay is coded in milliseconds, rounded to
+//			the nearest millisecond. (R) (mandatory) (1 byte)
+//
+//		Actual Data Rate
+//			Actual data rate: This parameter reports the actual net data rate of the bearer channel,
+//			excluding the L1 and L2 states. In the L1 or L2 state, the parameter contains the net data rate
+//			in the previous L0 state. The data rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Previous Data Rate
+//			Previous data rate: This parameter reports the previous net data rate of the bearer channel just
+//			before the latest rate change event occurred, excluding transitions between the L0 state and the
+//			L1 or L2 state. A rate change can occur at a power management state transition, e.g., at full or
+//			short initialization, fast retrain or power down, or at a dynamic rate adaptation. The rate is
+//			coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Actual Impulse Noise Protection
+//			Actual impulse noise protection: The ACTINP attribute reports the actual INP on the bearer
+//			channel in the L0 state. In the L1 or L2 state, the attribute contains the INP in the previous
+//			L0 state. The value is coded in fractions of DMT symbols with a granularity of 0.1 symbols. The
+//			range is from 0 (0.0 symbols) to 254 (25.4 symbols). The special value 255 indicates an ACTINP
+//			higher than 25.4. (R) (mandatory for ITU-T G.993.2 VDSL2, optional for other xDSL
+//			Recommendations that support it) (1 byte)
+//
+//		Impulse Noise Protection Reporting Mode
+//			Impulse noise protection reporting mode: The INPREPORT attribute reports the method used to
+//			compute the ACTINP. If set to 0, the ACTINP is computed according to the INP_no_erasure formula
+//			(clause 9.6 of [ITUT G.993.2]). If set to 1, ACTINP is the value estimated by the xTU receiver.
+//			(R) (mandatory for ITU-T G.993.2 VDSL2) (1 byte)
+//
+//		Actual Size Of Reed_Solomon Codeword
+//			Actual size of Reed-Solomon codeword: The NFEC attribute reports the actual Reed-Solomon
+//			codeword size used in the latency path in which the bearer channel is transported. Its value is
+//			coded in bytes in the range 0..255. (R) (mandatory for ITU-T G.993.2 VDSL2, optional for others)
+//			(1 byte)
+//
+//		Actual Number Of Reed_Solomon Redundancy Bytes
+//			Actual number of Reed-Solomon redundancy bytes: The RFEC attribute reports the actual number of
+//			Reed-Solomon redundancy bytes per codeword used in the latency path in which the bearer channel
+//			is transported. Its value is coded in bytes in the range 0..16. The value 0 indicates no Reed-
+//			Solomon coding. (R) (mandatory for ITUT G.993.2 VDSL2, optional for others) (1 byte)
+//
+//		Actual Number Of Bits Per Symbol
+//			Actual number of bits per symbol: The LSYMB attribute reports the actual number of bits per
+//			symbol assigned to the latency path in which the bearer channel is transported, excluding
+//			trellis overhead. Its value is coded in bits in the range 0..65535. (R) (mandatory for
+//			ITUT G.993.2 VDSL2, optional for others) (2 bytes)
+//
+//		Actual Interleaving Depth
+//			Actual interleaving depth: The INTLVDEPTH attribute reports the actual depth of the interleaver
+//			used in the latency path in which the bearer channel is transported. Its value ranges from
+//			1..4096 in steps of 1. The value 1 indicates no interleaving. (R) (mandatory for ITU-T G.993.2
+//			VDSL2, optional for others) (2 bytes)
+//
+//		Actual Interleaving Block Length
+//			Actual interleaving block length: The INTLVBLOCK attribute reports the actual block length of
+//			the interleaver used in the latency part in which the bearer channel is transported. Its value
+//			ranges from 4..255 in steps of 1. (R) (mandatory forITU-T G.993.2 VDSL2, optional for others)
+//			(1 byte)
+//
+//		Actual Latency Path
+//			Actual latency path: The LPATH attribute reports the index of the actual latency path in which
+//			the bearer channel is transported. Valid values are 0..3. In [ITUT G.992.1], the fast path is
+//			mapped to latency index 0; the interleaved path to index 1. (R) (mandatory for ITU-T G.993.2
+//			VDSL2, optional for others) (1 byte)
+//
+type XdslChannelUpstreamStatusData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslchannelupstreamstatusdataBME = &ManagedEntityDefinition{
+		Name:    "XdslChannelUpstreamStatusData",
+		ClassID: 103,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFFE0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  ByteField("ActualInterleavingDelay", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint32Field("ActualDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint32Field("PreviousDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  ByteField("ActualImpulseNoiseProtection", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  ByteField("ImpulseNoiseProtectionReportingMode", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  ByteField("ActualSizeOfReedSolomonCodeword", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  ByteField("ActualNumberOfReedSolomonRedundancyBytes", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("ActualNumberOfBitsPerSymbol", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint16Field("ActualInterleavingDepth", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: ByteField("ActualInterleavingBlockLength", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: ByteField("ActualLatencyPath", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+		},
+	}
+}
+
+// NewXdslChannelUpstreamStatusData (class ID 103 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslChannelUpstreamStatusData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslchannelupstreamstatusdataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart2.go b/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart2.go
new file mode 100644
index 0000000..be4d045
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart2.go
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineConfigurationProfilePart2ClassId ClassID = ClassID(105)
+
+var xdsllineconfigurationprofilepart2BME *ManagedEntityDefinition
+
+// XdslLineConfigurationProfilePart2 (class ID #105)
+//	The overall xDSL line configuration profile is modelled in several parts, all of which are
+//	associated together through a common ME ID (the client PPTP xDSL UNI part 1 has a single
+//	pointer, which refers to the entire set of line configuration profile parts).
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. All xDSL and
+//			VDSL2 line configuration profiles and extensions that pertain to a given PPTP xDSL UNI must
+//			share a common ME ID. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Downstream Minimum Time Interval For Upshift Rate Adaptation
+//			Downstream minimum time interval for upshift rate adaptation: This parameter defines the
+//			interval during which the downstream noise margin must remain above the downstream upshift noise
+//			margin before the xTU-R attempts to increase the downstream net data rate. Its value ranges from
+//			0 to 16383 s. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Upstream Minimum Time Interval For Upshift Rate Adaptation
+//			Upstream minimum time interval for upshift rate adaptation: This parameter defines the interval
+//			during which the upstream noise margin must remain above the upstream upshift noise margin
+//			before the xTU-C attempts to increase the upstream net data rate. Its value ranges from 0 to
+//			16383 s. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Downstream Downshift Noise Margin
+//			Downstream downshift noise margin: If the downstream noise margin is below the downstream
+//			downshift noise margin and remains there for more than the downstream minimum time interval for
+//			downshift rate adaptation, the xTU-R attempts to decrease the downstream net data rate. This
+//			attribute's value ranges from 0 (0.0 dB) to 310 (31.0 dB). (R, W, setbycreate) (optional)
+//			(2 bytes)
+//
+//		Upstream Downshift Noise Margin
+//			Upstream downshift noise margin: If the upstream noise margin is below the upstream downshift
+//			noise margin and remains there for more than the upstream minimum time interval for downshift
+//			rate adaptation, the xTUC attempts to decrease the upstream net data rate. This attribute's
+//			value ranges from 0 (0.0 dB) to 310 (31.0 dB). (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Downstream Minimum Time Interval For Downshift Rate Adaptation
+//			Downstream minimum time interval for downshift rate adaptation: This parameter defines the
+//			interval during which the downstream noise margin must remain below the downstream downshift
+//			noise margin before the xTU-R attempts to decrease the downstream net data rate. Its value
+//			ranges from 0 to 16383 s. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Upstream Minimum Time Interval For Downshift Rate Adaptation
+//			Upstream minimum time interval for downshift rate adaptation: This parameter defines the
+//			interval during which the upstream noise margin must remain below the upstream downshift noise
+//			margin before the xTU-C attempts to decrease the upstream net data rate. Its value ranges from 0
+//			to 16383 s. (R, W, setbycreate) (optional) (2 bytes)
+//
+//		Xtu Impedance State Forced
+//			(R, W, setbycreate) (optional) (1 byte)
+//
+//		L0_Time
+//			L0-time:	This parameter specifies the minimum time between an exit from the L2 state and the
+//			next entry into the L2 state. It is only valid for [ITUT G.992.3], [ITUT G.992.4] and
+//			[ITUT G.992.5]. It ranges from 0 to 255 s. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		L2_Time
+//			L2-time:	This parameter specifies the minimum time between an entry into the L2 state and the
+//			first power trim in the L2 state, or between two consecutive power trims in the L2 state. It is
+//			only valid for [ITUT G.992.3], [ITUT G.992.4] and [ITUT G.992.5]. It ranges from 0 to 255 s. (R,
+//			W, setbycreate) (mandatory) (1 byte)
+//
+//		Downstream Maximum Nominal Power Spectral Density
+//			Downstream maximum nominal power spectral density: This attribute specifies the maximum nominal
+//			transmit PSD in the downstream direction during initialization and showtime. A single
+//			MAXNOMPSDds attribute is defined per mode enabled in the xTSE line configuration attribute. It
+//			is only valid for [ITUT G.992.3], [ITUT G.992.4] and [ITUT G.992.5]. Its value ranges from 0
+//			(60.0 dBm/Hz) to 300 (–30 dBm/Hz). (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Upstream Maximum Nominal Power Spectral Density
+//			Upstream maximum nominal power spectral density: This attribute specifies the maximum nominal
+//			transmit PSD in the upstream direction during initialization and showtime. A single MAXNOMPSDus
+//			attribute is defined per mode enabled in the xTSE line configuration attribute. It is only valid
+//			for [ITUT G.992.3], [ITUT G.992.4] and [ITUT G.993.2]. Its value ranges from 0 (–60.0 dBm/Hz) to
+//			300 (–30 dBm/Hz). (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Downstream Maximum Nominal Aggregate Transmit Power
+//			Downstream maximum nominal aggregate transmit power: This attribute specifies the maximum
+//			nominal aggregate transmit power in the downstream direction during initialization and showtime.
+//			It is only valid for [ITUT G.992.3], [ITUT G.992.4], [ITUT G.992.5] and [ITUT G.993.2]. Its
+//			value ranges from 0 (0.0 dBm) to 255 (25.5 dBm). (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Upstream Maximum Nominal Aggregate Transmit Power
+//			Upstream maximum nominal aggregate transmit power: This parameter specifies the maximum nominal
+//			aggregate transmit power in the upstream direction during initialization and showtime. It is
+//			only valid for [ITUT G.992.3], [ITUT G.992.4] and [ITUT G.992.5]. Its value ranges from 0
+//			(0.0 dBm) to 255 (25.5 dBm). (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Upstream Maximum Aggregate_Receive Power
+//			Upstream maximum aggregate receive power: This parameter specifies the maximum upstream
+//			aggregate receive power over a set of subcarriers, as defined in the relevant Recommendation.
+//			The xTU-C requests an upstream power cutback such that the upstream aggregate receive power over
+//			that set of subcarriers is at or below the configured maximum value. It is only valid for
+//			[ITUT G.992.3], [ITUT G.992.4] and [ITUT G.992.5]. This attribute ranges from 0 (25.5 dBm) to
+//			510 (+25.5 dBm). The special value 0xFFFF indicates that no upstream maximum aggregate receive
+//			power limit is to be applied. (R, W setbycreate) (mandatory) (2 bytes)
+//
+//		Vdsl2 Transmission System Enabling
+//			VDSL2 transmission system enabling: This configuration attribute extends the transmission system
+//			coding types to be allowed by the xTU-C. It is a bit map, defined as octet 8 (bits 57..64) in
+//			Table 9.7.12-1. (R, W, setbycreate) (optional) (1 byte)
+//
+type XdslLineConfigurationProfilePart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineconfigurationprofilepart2BME = &ManagedEntityDefinition{
+		Name:    "XdslLineConfigurationProfilePart2",
+		ClassID: 105,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  Uint16Field("DownstreamMinimumTimeIntervalForUpshiftRateAdaptation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 1),
+			2:  Uint16Field("UpstreamMinimumTimeIntervalForUpshiftRateAdaptation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 2),
+			3:  Uint16Field("DownstreamDownshiftNoiseMargin", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 3),
+			4:  Uint16Field("UpstreamDownshiftNoiseMargin", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 4),
+			5:  Uint16Field("DownstreamMinimumTimeIntervalForDownshiftRateAdaptation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 5),
+			6:  Uint16Field("UpstreamMinimumTimeIntervalForDownshiftRateAdaptation", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 6),
+			7:  ByteField("XtuImpedanceStateForced", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 7),
+			8:  ByteField("L0Time", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 8),
+			9:  ByteField("L2Time", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 9),
+			10: Uint16Field("DownstreamMaximumNominalPowerSpectralDensity", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 10),
+			11: Uint16Field("UpstreamMaximumNominalPowerSpectralDensity", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 11),
+			12: ByteField("DownstreamMaximumNominalAggregateTransmitPower", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 12),
+			13: ByteField("UpstreamMaximumNominalAggregateTransmitPower", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 13),
+			14: Uint16Field("UpstreamMaximumAggregateReceivePower", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: ByteField("Vdsl2TransmissionSystemEnabling", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewXdslLineConfigurationProfilePart2 (class ID 105 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineConfigurationProfilePart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineconfigurationprofilepart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart3.go b/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart3.go
new file mode 100644
index 0000000..9623bed
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineconfigurationprofilepart3.go
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineConfigurationProfilePart3ClassId ClassID = ClassID(106)
+
+var xdsllineconfigurationprofilepart3BME *ManagedEntityDefinition
+
+// XdslLineConfigurationProfilePart3 (class ID #106)
+//	The overall xDSL line configuration profile is modelled in several parts, all of which are
+//	associated together through a common ME ID (the client PPTP xDSL UNI part 1 has a single
+//	pointer, which refers to the entire set of line configuration profile parts).
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. All xDSL and
+//			VDSL2 line configuration profiles and extensions that pertain to a given PPTP xDSL UNI must
+//			share a common ME ID. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Loop Diagnostics Mode Forced Ldsf
+//			Only while the line power management state is L3 can the line be forced into loop diagnostic
+//			mode. When loop diagnostic procedures complete successfully, the ONU resets this attribute to 0.
+//			The line remains in the L3 idle state. The loop diagnostics data are available at least until
+//			the line is forced to the L0 state. As long as loop diagnostic procedures have not completed
+//			successfully, attempts are made to do so, until the loop diagnostic mode is no longer forced on
+//			the line through this configuration parameter. If loop diagnostic procedures cannot be completed
+//			successfully after a vendordiscretionary number of retries or within a vendor-discretionary
+//			timeout, then an initialization failure occurs. (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Automode Cold Start Forced
+//			Automode is defined as the case where multiple operation modes are enabled in xTSE (Table
+//			9.7.12-1) and where the selection of the operation mode to be used for transmission depends, not
+//			only on the common capabilities of both xTUs (as exchanged in [ITU-T G.994.1]), but also on
+//			achievable data rates under given loop conditions. (R, W, setbycreate) (mandatory if automode is
+//			supported) (1 byte)
+//
+//		L2 Atpr
+//			L2ATPR:	This parameter specifies the maximum aggregate transmit power reduction that can be
+//			performed in the L2 request (i.e., at the transition of L0 to L2 state) or through a single
+//			power trim in the L2 state. It is only valid for [ITUT G.992.3], [ITUT G.992.4] and
+//			[ITUT G.992.5]. This attribute ranges from 0 (0 dB) dB to 31 (31 dB). (R, W, setbycreate)
+//			(mandatory) (1 byte)
+//
+//		L2 Atprt
+//			L2ATPRT:	This parameter specifies the total maximum aggregate transmit power reduction (in
+//			decibels) that can be performed in an L2 state. This is the sum of all reductions of L2 requests
+//			(i.e., at transitions from L0 to L2 state) and power trims. This attribute ranges from 0 (0 dB)
+//			dB to 31 (31 dB). (R, W, setbycreate) (mandatory) (1 byte)
+//
+//		Force Inp Downstream
+//			Force INP downstream: When set to 1, the FORCEINPds attribute forces the framer settings of all
+//			downstream bearer channels to be selected such that the impulse noise protection (INP) computed
+//			according to the formula specified in the relevant Recommendation is greater than or equal to
+//			the minimal INP requirement. The default value 0 disables this function. (R, W) (mandatory for
+//			[ITU-T G.993.2], optional for other Recommendations that support it) (1 byte)
+//
+//		Force Inp Upstream
+//			Force INP upstream: When set to 1, the FORCEINPus attribute forces the framer settings of all
+//			upstream bearer channels to be selected such that the INP computed according to the formula
+//			specified in the relevant Recommendation is greater than or equal to the minimal INP
+//			requirement. The default value 0 disables this function. (R, W) (mandatory for [ITU-T G.993.2],
+//			optional for other Recommendations that support it) (1 byte)
+//
+//		Update Request Flag For Near_End Test Parameters
+//			Update request flag for near-end test parameters: The UPDATE-TEST-NE attribute forces an update
+//			of all near-end test parameters that can be updated during showtime in [ITU-T G.993.2]. Update
+//			is triggered by setting this attribute to 1, whereupon the near-end test parameters are expected
+//			to be updated within 10 s, and the ONU should reset the attribute value to 0. The update request
+//			flag is independent of any autonomous update process in the system. The update request attribute
+//			must be prepared to accept another set after a period not to exceed 3 min, a period that starts
+//			when the flag is set via the OMCI or by an autonomous process in the system. (R, W) (optional)
+//			(1 byte)
+//
+//		Update Request Flag For Far_End Test Parameters
+//			Update request flag for far-end test parameters: The UPDATE-TEST-FE attribute forces an update
+//			of all far-end test parameters that can be updated during showtime in [ITU-T G.993.2]. Update is
+//			triggered by setting this attribute to 1, whereupon the far-end test parameters are expected to
+//			be updated within 10 s, and the ONU should reset the attribute value to 0. The update request
+//			flag is independent of any autonomous update process in the system. The update request attribute
+//			must be prepared to accept another set after a period not to exceed 3 min, a period that starts
+//			when the flag is set via the OMCI or by an autonomous process in the system. (R, W) (optional)
+//			(1 byte)
+//
+//		Inm Inter Arrival Time Offset Upstream
+//			INM inter-arrival time offset upstream: INMIATOus is the inter-arrival time (IAT) offset that
+//			the xTU-C receiver uses to determine in which bin of the IAT histogram the IAT is reported.
+//			Valid values for INMIATO range from 3 to 511 discrete multi-tone (DMT) symbols in steps of 1 DMT
+//			symbol. (R, W) (optional) (2 bytes)
+//
+//		Inm Inter_Arrival Time Step Upstream
+//			INM inter-arrival time step upstream: INMIATSus is the IAT step that the xTU-C receiver uses to
+//			determine in which bin of the IAT histogram the IAT is reported. Valid values for INMIATS range
+//			from 0 to 7 in steps of 1. (R, W) (optional) (1 byte)
+//
+//		Inm Cluster Continuation Value Upstream
+//			INM cluster continuation value upstream: INMCCus is the cluster continuation value that the
+//			xTU-C receiver uses in the cluster indication process described in the applicable
+//			Recommendation. Valid values for INMCC range from 0 to 64 DMT symbols in steps of 1 DMT symbol.
+//			(R, W) (optional) (1 byte)
+//
+//		Inm Equivalent Inp Mode Upstream
+//			INM equivalent INP mode upstream: INM_INPEQ_MODEus is the INM equivalent INP mode that the xTU-C
+//			receiver uses in the computation of the equivalent INP, as defined in the applicable
+//			Recommendation. Valid values for INM_INPEQ_MODE are 0..4. (R, W) (optional) (1 byte)
+//
+//		Inm Inter Arrival Time Offset Downstream
+//			INM inter-arrival time offset downstream: INMIATOds is the IAT offset that the xTU-R receiver
+//			uses to determine in which bin of the IAT histogram the IAT is reported. Valid values for
+//			INMIATO range from 3 to 511 DMT symbols in steps of 1 DMT symbol. (R, W) (optional) (2 bytes)
+//
+//		Inm Inter_Arrival Time Step Downstream
+//			INM inter-arrival time step downstream: INMIATSds is the IAT step that the xTU-R receiver uses
+//			to determine in which bin of the IAT histogram the IAT is reported. Valid values for INMIATS
+//			range from 0 to 7 in steps of 1. (R, W) (optional) (1 byte)
+//
+//		Inm Cluster Continuation Value Downstream
+//			INM cluster continuation value downstream: INMCCds is the cluster continuation value that the
+//			xTU-R receiver uses in the cluster indication process described in the applicable
+//			Recommendation. Valid values for INMCC range from 0 to 64 DMT symbols in steps of 1 DMT symbol.
+//			(R, W) (optional) (1 byte)
+//
+//		Inm Equivalent Inp Mode Downstream
+//			INM equivalent INP mode downstream: INM_INPEQ_MODEds is the INM equivalent INP mode that the
+//			xTU-R receiver uses in the computation of the equivalent INP, as defined in the applicable
+//			Recommendation. Valid values for INM_INPEQ_MODE are 0..4. (R, W) (optional) (1 byte)
+//
+type XdslLineConfigurationProfilePart3 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineconfigurationprofilepart3BME = &ManagedEntityDefinition{
+		Name:    "XdslLineConfigurationProfilePart3",
+		ClassID: 106,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("LoopDiagnosticsModeForcedLdsf", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2:  ByteField("AutomodeColdStartForced", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  ByteField("L2Atpr", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 3),
+			4:  ByteField("L2Atprt", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 4),
+			5:  ByteField("ForceInpDownstream", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+			6:  ByteField("ForceInpUpstream", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 6),
+			7:  ByteField("UpdateRequestFlagForNearEndTestParameters", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 7),
+			8:  ByteField("UpdateRequestFlagForFarEndTestParameters", 0, mapset.NewSetWith(Read, Write), true, false, true, false, 8),
+			9:  Uint16Field("InmInterArrivalTimeOffsetUpstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 9),
+			10: ByteField("InmInterArrivalTimeStepUpstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 10),
+			11: ByteField("InmClusterContinuationValueUpstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 11),
+			12: ByteField("InmEquivalentInpModeUpstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 12),
+			13: Uint16Field("InmInterArrivalTimeOffsetDownstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 13),
+			14: ByteField("InmInterArrivalTimeStepDownstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 14),
+			15: ByteField("InmClusterContinuationValueDownstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 15),
+			16: ByteField("InmEquivalentInpModeDownstream", 0, mapset.NewSetWith(Read, Write), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewXdslLineConfigurationProfilePart3 (class ID 106 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineConfigurationProfilePart3(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineconfigurationprofilepart3BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart1.go b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart1.go
new file mode 100644
index 0000000..46346fc
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart1.go
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineInventoryAndStatusDataPart1ClassId ClassID = ClassID(100)
+
+var xdsllineinventoryandstatusdatapart1BME *ManagedEntityDefinition
+
+// XdslLineInventoryAndStatusDataPart1 (class ID #100)
+//	This ME contains part 1 of the line inventory and status data for an xDSL UNI. The ONU
+//	automatically creates or deletes an instance of this ME upon the creation or deletion of a PPTP
+//	xDSL UNI part 1.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Xtu_C G.994.1 Vendor Id
+//			xTU-C G.994.1 vendor ID: This is the vendor ID as inserted by the xTU-C in the ITUT G.994.1 CL
+//			message. It comprises 8 octets, including a country code followed by a (regionally allocated)
+//			provider code, as defined in [ITUT T.35]. (R) (mandatory) (8 bytes)
+//
+//		Xtu_R G.994.1 Vendor Id
+//			xTU-R G.994.1 vendor ID: This is the vendor ID as inserted by the xTU-R in the ITUT G.994.1 CLR
+//			message. It comprises 8 binary octets, with the same format as the xTUC ITUT G.994.1 vendor ID.
+//			(R) (mandatory) (8 bytes)
+//
+//		Xtu_C System Vendor Id
+//			xTU-C system vendor ID: This is the vendor ID as inserted by the xTU-C in the overhead messages
+//			of [ITU-T G.992.3] and [ITU-T G.992.4]. It comprises 8 binary octets, with the same format as
+//			the xTU-C ITUT G.994.1 vendor ID. (R) (mandatory) (8 bytes)
+//
+//		Xtu_R System Vendor Id
+//			xTU-R system vendor ID: This is the vendor ID as inserted by the xTU-R in the embedded
+//			operations channel and overhead messages of [ITU-T G.992.3] and [ITUT G.992.4]. It comprises 8
+//			binary octets, with the same format as the xTU-C ITUT G.994.1 vendor ID. (R) (mandatory)
+//			(8 bytes)
+//
+//		Xtu_C Version Number
+//			xTU-C version number: This is the vendorspecific version number as inserted by the xTUC in the
+//			overhead messages of [ITU-T G.992.3] and [ITU-T G.992.4]. It comprises up to 16 binary octets.
+//			(R) (mandatory) (16 bytes)
+//
+//		Xtu_R Version Number
+//			xTU-R version number: This is the version number as inserted by the xTUR in the embedded
+//			operations channel of [ITU-T G.992.1] or [ITU-T G.992.2], or the overhead messages of [ITU-T
+//			G.992.3], [ITU-T G.992.4], [ITU-T G.992.5] and [ITU-T G.993.2]. The attribute value may be
+//			vendor-specific, but is recommended to comprise up to 16 ASCII characters, null-terminated if it
+//			is shorter than 16. The string should contain the xTU-R firmware version and the xTU-R model,
+//			encoded in that order and separated by a space character: "<xTU-R firmware version><xTU-R
+//			model>". It is recognized that legacy xTU-Rs may not support this format. (R) (mandatory)
+//			(16 bytes)
+//
+//		Xtu_C Serial Number Part 1
+//			xTU-C serial number part 1: The vendorspecific serial number inserted by the xTU-C in the
+//			overhead messages of [ITU-T G.992.3] and [ITU-T G.992.4] comprises up to 32 ASCII characters,
+//			null terminated if it is shorter than 32 characters. This attribute contains the first 16
+//			characters. (R) (mandatory) (16 bytes)
+//
+//		Xtu_C Serial Number Part 2
+//			xTU-C serial number part 2: This attribute contains the second 16 characters of the xTU-C serial
+//			number. (R) (mandatory) (16 bytes)
+//
+//		Xtu_R Serial Number Part 1
+//			xTU-R serial number part 1: The serial number inserted by the xTU-R in the embedded operations
+//			channel of [ITU-T G.992.1] or [ITU-T G.992.2], or the overhead messages of [ITU-T G.992.3],
+//			[ITU-T G.992.4], [ITU-T G.992.5] and [ITUT G.993.2], comprises up to 32 ASCII characters,
+//			nullterminated if it is shorter than 32. It is recommended that the equipment serial number, the
+//			equipment model and the equipment firmware version, encoded in that order and separated by space
+//			characters, be contained: "<equipment serial number><equipment model><equipment firmware
+//			version>". It is recognized that legacy xTU-Rs may not support this format. This attribute
+//			contains the first 16 characters. (R) (mandatory) (16 bytes)
+//
+//		Xtu_R Serial Number Part 2
+//			xTU-R serial number part 2: This attribute contains the second 16 characters of the xTU-R serial
+//			number. (R) (mandatory) (16 bytes)
+//
+//		Xtu_C Self Test Results
+//			xTU-C selftest results: This parameter reports the xTU-C self-test result. It is coded in two
+//			fields. The most significant octet is 0 if the self-test passed and 1 if it failed. The three
+//			least significant octets are a vendor-discretionary integer that can be interpreted in
+//			combination with [ITU-T G.994.1] and the system vendor ID. (R) (mandatory) (4 bytes)
+//
+//		Xtu_R Self Test Results
+//			xTU-R selftest results: This parameter defines the xTU-R self-test result. It is coded in two
+//			fields. The most significant octet is 0 if the self-test passed and 1 if it failed. The three
+//			least significant octets are a vendor-discretionary integer that can be interpreted in
+//			combination with [ITU-T G.994.1] and the system vendor ID. (R) (mandatory) (4 bytes)
+//
+//		Xtu_C Transmission System Capability
+//			NOTE 1 – This attribute is only 7 bytes long. An eighth byte identifying VDSL2 capabilities is
+//			defined in the VDSL2 line inventory and status data part 1 ME.
+//
+//		Xtu_R Transmission System Capability
+//			NOTE 2 – This attribute is only 7 bytes long. An eighth byte identifying VDSL2 capabilities is
+//			defined in the VDSL2 line inventory and status data part 2 ME.
+//
+//		Initialization Success_Failure Cause
+//			(R) (mandatory) (1 byte)
+//
+type XdslLineInventoryAndStatusDataPart1 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineinventoryandstatusdatapart1BME = &ManagedEntityDefinition{
+		Name:    "XdslLineInventoryAndStatusDataPart1",
+		ClassID: 100,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  Uint64Field("XtuCG9941VendorId", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint64Field("XtuRG9941VendorId", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint64Field("XtuCSystemVendorId", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint64Field("XtuRSystemVendorId", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  MultiByteField("XtuCVersionNumber", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  MultiByteField("XtuRVersionNumber", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  MultiByteField("XtuCSerialNumberPart1", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  MultiByteField("XtuCSerialNumberPart2", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  MultiByteField("XtuRSerialNumberPart1", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: MultiByteField("XtuRSerialNumberPart2", 16, nil, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint32Field("XtuCSelfTestResults", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint32Field("XtuRSelfTestResults", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: MultiByteField("XtuCTransmissionSystemCapability", 7, nil, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: MultiByteField("XtuRTransmissionSystemCapability", 7, nil, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: ByteField("InitializationSuccessFailureCause", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+		},
+	}
+}
+
+// NewXdslLineInventoryAndStatusDataPart1 (class ID 100 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineInventoryAndStatusDataPart1(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineinventoryandstatusdatapart1BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart2.go b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart2.go
new file mode 100644
index 0000000..6da6e33
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart2.go
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineInventoryAndStatusDataPart2ClassId ClassID = ClassID(101)
+
+var xdsllineinventoryandstatusdatapart2BME *ManagedEntityDefinition
+
+// XdslLineInventoryAndStatusDataPart2 (class ID #101)
+//	This ME contains part 2 of the line inventory and status data for an xDSL UNI. The ONU
+//	automatically creates or deletes an instance of this ME upon the creation or deletion of a PPTP
+//	xDSL UNI part 1.
+//
+//	NOTE 1 – [ITU-T G.997.1] specifies that bit rate attributes have granularity of 1000 bit/s. If
+//	ITUT G.997.1 compliance is required, the ONU should only report values with this granularity.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R)
+//			(mandatory) (2 bytes)
+//
+//		Xdsl Transmission System
+//			NOTE 2 – This attribute is only 7 bytes long. An eighth byte identifying VDSL2 capabilities in
+//			use is defined in the VDSL2 line inventory and status data part 1 ME.
+//
+//		Line Power Management State
+//			(R) (mandatory) (1 byte)
+//
+//		Downstream Line Attenuation
+//			NOTE 3 – [ITU-T G.993.2] specifies a per-band array to represent this attribute. The array is
+//			defined in the VDSL2 line inventory and status data part 3 ME. In an ITU-T G.993.2 context, the
+//			downstream line attenuation attribute should be set to 0 here, and populated in the VDSL2 line
+//			inventory and status data part 3 ME instead.
+//
+//		Upstream Line Attenuation
+//			NOTE 4 – [ITU-T G.993.2] specifies a per-band array to represent this attribute. The array is
+//			defined in the VDSL2 line inventory and status data part 3 ME. In an ITU-T G.993.2 context, the
+//			upstream line attenuation attribute should be set to 0 here, and populated in the VDSL2 line
+//			inventory and status data part 3 ME instead.
+//
+//		Downstream Signal Attenuation
+//			NOTE 6 – [ITU-T G.993.2] specifies a per-band array to represent this attribute. The array is
+//			defined in the VDSL2 line inventory and status data part 3 ME. In an ITU-T G.993.2 context, the
+//			downstream signal attenuation attribute should be set to 0 here, and populated in the VDSL2 line
+//			inventory and status data part 3 ME instead.
+//
+//		Upstream Signal Attenuation
+//			NOTE 8 – [ITU-T G.993.2] specifies a per-band array to represent this attribute. The array is
+//			defined in the VDSL2 line inventory and status data part 3 ME. In an ITU-T G.993.2 context, the
+//			upstream signal attenuation attribute should be set to 0 here, and populated in the VDSL2 line
+//			inventory and status data part 3 ME instead.
+//
+//		Downstream Snr Ratio Margin
+//			Downstream SNR ratio margin: The downstream SNR margin SNRMds is the maximum increase of noise
+//			power received at the xTUR, such that the BER requirements can still be met for all downstream
+//			bearer channels. The attribute value ranges from 0 (–64.0 dB) to 1270 (+63.0 dB). The special
+//			value 0xFFFF indicates that the attribute is out of range (R) (mandatory) (2 bytes)
+//
+//		Upstream Snr Margin
+//			Upstream SNR margin: The upstream SNR margin SNRMus is the maximum increase of noise power
+//			received at the xTUC, such that the BER requirements can still be met for all upstream bearer
+//			channels. The attribute value ranges from 0 (–64.0 dB) to 1270 (+63.0 dB). The special value
+//			0xFFFF indicates that the attribute is out of range. (R) (mandatory) (2 bytes)
+//
+//		Downstream Maximum Attainable Data Rate
+//			Downstream maximum attainable data rate: The ATTNDRds attribute indicates the maximum downstream
+//			net data rate currently attainable. The rate is coded in bits per second. (R) (mandatory)
+//			(4 bytes)
+//
+//		Upstream Maximum Attainable Data Rate
+//			Upstream maximum attainable data rate: The ATTNDRus attribute indicates the maximum upstream net
+//			data rate currently attainable. The rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Downstream Actual Power Spectrum Density
+//			Downstream actual power spectrum density: The ACTPSDds attribute is the average downstream
+//			transmit power spectrum density over the subcarriers in use (subcarriers to which downstream
+//			user data are allocated) delivered by the xTUC at the UC reference point, at the instant of
+//			measurement. The attribute value ranges from 0 (–90.0 dBm/Hz) to 900 (0.0 dBm/Hz). The special
+//			value (0xFFFF) indicates that the parameter is out of range. (R) (mandatory) (2 bytes)
+//
+//		Upstream Actual Power Spectrum Density
+//			Upstream actual power spectrum density: The ACTPSDus attribute is the average upstream transmit
+//			power spectrum density over the subcarriers in use (subcarriers to which upstream user data are
+//			allocated) delivered by the xTUR at the UR reference point, at the instant of measurement. The
+//			attribute value ranges from 0 (–90.0 dBm/Hz) to 900 (0.0 dBm/Hz). The special value 0xFFFF
+//			indicates that the attribute is out of range. (R) (mandatory) (2 bytes)
+//
+//		Downstream Actual Aggregate Transmit Power
+//			NOTE 9 – The downstream nominal aggregate transmit power may be taken as a best estimate of the
+//			parameter.
+//
+//		Upstream Actual Aggregate Transmit Power
+//			NOTE 10 – The upstream nominal aggregate transmit power may be taken as a best estimate of the
+//			parameter.
+//
+//		Initialization _ Last State Transmitted Downstream
+//			(R) (mandatory) (1 byte)
+//
+//		Initialization _ Last State Transmitted Upstream
+//			(R) (mandatory) (1 byte)
+//
+type XdslLineInventoryAndStatusDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineinventoryandstatusdatapart2BME = &ManagedEntityDefinition{
+		Name:    "XdslLineInventoryAndStatusDataPart2",
+		ClassID: 101,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  MultiByteField("XdslTransmissionSystem", 7, nil, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  ByteField("LinePowerManagementState", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint16Field("DownstreamLineAttenuation", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint16Field("UpstreamLineAttenuation", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint16Field("DownstreamSignalAttenuation", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint16Field("UpstreamSignalAttenuation", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint16Field("DownstreamSnrRatioMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("UpstreamSnrMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint32Field("DownstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("UpstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint16Field("DownstreamActualPowerSpectrumDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint16Field("UpstreamActualPowerSpectrumDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint16Field("DownstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint16Field("UpstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: ByteField("InitializationLastStateTransmittedDownstream", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: ByteField("InitializationLastStateTransmittedUpstream", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewXdslLineInventoryAndStatusDataPart2 (class ID 101 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineInventoryAndStatusDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineinventoryandstatusdatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart5.go b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart5.go
new file mode 100644
index 0000000..0d02d68
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart5.go
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineInventoryAndStatusDataPart5ClassId ClassID = ClassID(325)
+
+var xdsllineinventoryandstatusdatapart5BME *ManagedEntityDefinition
+
+// XdslLineInventoryAndStatusDataPart5 (class ID #325)
+//	This ME extends the attributes defined in the xDSL line inventory and status data parts 1..4.
+//	This ME reports FEXT and NEXT attributes, and pertains to Annex C of [ITUT G.992.3] (ADSL2) and
+//	Annex C of [ITUT G.992.5] (ADSL2plus).
+//
+//	Relationships
+//		This is one of the status data MEs associated with an xDSL UNI. The ONU automatically creates or
+//		deletes an instance of this ME upon creation or deletion of a PPTP xDSL UNI part 1 that supports
+//		these attributes.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1 ME. (R)
+//			(mandatory) (2 bytes)
+//
+//		Fext Downstream Snr Margin
+//			FEXT downstream SNR margin: The FEXT SNRMds attribute is the downstream SNR margin measured
+//			during FEXTR duration at the ATU-R. The attribute value ranges from 0 (–64.0 dB) to 1270
+//			(+63.0 dB). The special value 0xFFFF indicates that the attribute is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Next Downstream Snr Margin
+//			NEXT downstream SNR margin: The NEXT SNRMds attribute is the downstream SNR margin measured
+//			during NEXTR duration at the ATU-R. The attribute value ranges from 0 (–64.0 dB) to 1270
+//			(+63.0 dB). The special value 0xFFFF indicates that the attribute is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Fext Upstream Snr Margin
+//			FEXT upstream SNR margin: The FEXT SNRMus attribute is the upstream SNR margin (see clause
+//			7.5.1.16 of [ITUT G.997.1]) measured during FEXTC duration at the ATU-C. The attribute value
+//			ranges from 0 (–64.0 dB) to 1270 (+63.0 dB). The special value 0xFFFF indicates that the
+//			attribute is out of range. (R) (mandatory) (2 bytes)
+//
+//		Next Upstream Snr Margin
+//			NEXT upstream SNR margin: The NEXT SNRMus attribute is the upstream SNR margin (see clause
+//			7.5.1.16 of [ITUT G.997.1]) measured during NEXTC duration at the ATU-C. The attribute value
+//			ranges from 0 (–64.0 dB) to 1270 (+63.0 dB). The special value 0xFFFF indicates that the
+//			attribute is out of range. (R) (mandatory) (2 bytes)
+//
+//		Fext Downstream Maximum Attainable Data Rate
+//			FEXT downstream maximum attainable data rate: The FEXT ATTNDRds attribute is the maximum
+//			downstream net data rate calculated from FEXT downstream SNR(f) (see clause 7.5.1.28.3.1 of
+//			[ITUT G.997.1]). The rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Next Downstream Maximum Attainable Data Rate
+//			NEXT downstream maximum attainable data rate: The NEXT ATTNDRds attribute is the maximum
+//			downstream net data rate calculated from NEXT downstream SNR(f) (see clause 7.5.1.28.3.2 of
+//			[ITUT G.997.1]). The rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Fext Upstream Maximum Attainable Data Rate
+//			FEXT upstream maximum attainable data rate: The FEXT ATTNDRus attribute is the maximum upstream
+//			net data rate calculated from FEXT upstream SNR(f) (see clause 7.5.1.28.6.1 of [ITUT G.997.1]).
+//			The rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Next Upstream Maximum Attainable Data Rate
+//			NEXT upstream maximum attainable data rate: The NEXT ATTNDRus attribute is the maximum upstream
+//			net data rate calculated from NEXT upstream SNR(f) (see clause 7.5.1.28.6.2 of [ITUT G.997.1]).
+//			The rate is coded in bits per second. (R) (mandatory) (4 bytes)
+//
+//		Fext Downstream Actual Power Spectral Density
+//			FEXT downstream actual power spectral density: The FEXT ACTPSDds attribute is the average
+//			downstream transmit PSD over the used subcarriers (see clause 7.5.1.21.1 of [ITUT G.997.1])
+//			calculated from the REFPSDds and RMSGIds for FEXTR duration. The attribute value ranges from 0
+//			(–90.0 dBm/Hz) to 900 (0.0 dBm/Hz). The special value 0xFFFF indicates that the parameter is out
+//			of range. (R) (mandatory) (2 bytes)
+//
+//		Next Downstream Actual Power Spectral Density
+//			NEXT downstream actual power spectral density: The NEXT ACTPSDds attribute is the average
+//			downstream transmit PSD over the used subcarriers (see clause 7.5.1.21.2 of [ITUT G.997.1])
+//			calculated from the REFPSDds and RMSGIds for NEXTR duration. The attribute value ranges from 0
+//			(–90.0 dBm/Hz) to 900 (0.0 dBm/Hz). The special value 0xFFFF indicates that the parameter is out
+//			of range. (R) (mandatory) (2 bytes)
+//
+//		Fext Upstream Actual Power Spectral Density
+//			FEXT upstream actual power spectral density: The FEXT ACTPSDus attribute is the average upstream
+//			transmit PSD over the used subcarriers (see clause 7.5.1.22.1 of [ITUT G.997.1]) calculated from
+//			the REFPSDus and RMSGIus for FEXTC duration. The attribute value ranges from 0 (–90.0 dBm/Hz) to
+//			900 (0.0 dBm/Hz). The special value 0xFFFF indicates that the parameter is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Next Upstream Actual Power Spectral Density
+//			NEXT upstream actual power spectral density: The NEXT ACTPSDus attribute is the average upstream
+//			transmit PSD over the used subcarriers (see clause 7.5.1.22.2 of [ITUT G.997.1]) calculated from
+//			the REFPSDus and RMSGIus for NEXTC duration. The attribute value ranges from 0 (–90.0 dBm/Hz) to
+//			900 (0.0 dBm/Hz). The special value 0xFFFF indicates that the parameter is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Fext Downstream Actual Aggregate Transmit Power
+//			FEXT downstream actual aggregate transmit power: The FEXT ACTATPds attribute is the total amount
+//			of transmit power (see clause 7.5.1.24.1 of [ITUT G.997.1]) calculated from PSDds measured
+//			during FEXTR duration at the ATU-R. The attribute value ranges from 0 (–31.0 dBm) to 620
+//			(+31.0 dBm). The special value 0xFFFF indicates that the parameter is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Next Downstream Actual Aggregate Transmit Power
+//			NEXT downstream actual aggregate transmit power: The NEXT ACTATPds attribute is the total amount
+//			of transmit power (see clause 7.5.1.24.2 of [ITUT G.997.1]) calculated from PSDds measured
+//			during NEXTR duration at the ATU-R. The attribute value ranges from 0 (–31.0 dBm) to 620
+//			(+31.0 dBm). The special value 0xFFFF indicates that the parameter is out of range. (R)
+//			(mandatory) (2 bytes)
+//
+//		Fext Upstream Actual Aggregate Transmit Power
+//			FEXT upstream actual aggregate transmit power: The FEXT ACTATPus attribute is the total transmit
+//			power (see clause 7.5.1.25.1 of [ITUT G.997.1]) calculated from PSDus measured during FEXTC
+//			duration at the ATU-C. The attribute value ranges from 0 (–31.0 dBm) to 620 (+31.0 dBm). The
+//			special value 0xFFFF indicates that the parameter is out of range. (R) (mandatory) (2 bytes)
+//
+//		Next Upstream Actual Aggregate Transmit Power
+//			NEXT upstream actual aggregate transmit power: The NEXT ACTATPus attribute is the total transmit
+//			power (see clause 7.5.1.25.2 of [ITUT G.997.1]) calculated from PSDus measured during NEXTC
+//			duration at the ATU-C. The attribute value ranges from 0 (–31.0 dBm) to 620 (+31.0 dBm). The
+//			special value 0xFFFF indicates that the parameter is out of range. (R) (mandatory) (2 bytes)
+//
+type XdslLineInventoryAndStatusDataPart5 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineinventoryandstatusdatapart5BME = &ManagedEntityDefinition{
+		Name:    "XdslLineInventoryAndStatusDataPart5",
+		ClassID: 325,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1:  Uint16Field("FextDownstreamSnrMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("NextDownstreamSnrMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3:  Uint16Field("FextUpstreamSnrMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint16Field("NextUpstreamSnrMargin", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint32Field("FextDownstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint32Field("NextDownstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("FextUpstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint32Field("NextUpstreamMaximumAttainableDataRate", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint16Field("FextDownstreamActualPowerSpectralDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint16Field("NextDownstreamActualPowerSpectralDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint16Field("FextUpstreamActualPowerSpectralDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint16Field("NextUpstreamActualPowerSpectralDensity", 0, mapset.NewSetWith(Read), false, false, false, false, 12),
+			13: Uint16Field("FextDownstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint16Field("NextDownstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint16Field("FextUpstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 15),
+			16: Uint16Field("NextUpstreamActualAggregateTransmitPower", 0, mapset.NewSetWith(Read), false, false, false, false, 16),
+		},
+	}
+}
+
+// NewXdslLineInventoryAndStatusDataPart5 (class ID 325 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineInventoryAndStatusDataPart5(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineinventoryandstatusdatapart5BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart8.go b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart8.go
new file mode 100644
index 0000000..2f2966b
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdsllineinventoryandstatusdatapart8.go
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslLineInventoryAndStatusDataPart8ClassId ClassID = ClassID(414)
+
+var xdsllineinventoryandstatusdatapart8BME *ManagedEntityDefinition
+
+// XdslLineInventoryAndStatusDataPart8 (class ID #414)
+//	This ME extends the attributes defined in the xDSL line inventory and status data parts 1..4.
+//
+//	Relationships
+//		This is one of the status data MEs associated with an xDSL UNI. The ONU automatically creates or
+//		deletes an instance of this ME upon creation or deletion of a PPTP xDSL UNI part 1 that supports
+//		these attributes.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1 ME. (R)
+//			(mandatory) (2 bytes)
+//
+//		Retransmission Used Downstream Rtx_Usedds
+//			Retransmission used downstream (RTX_USEDds): This parameter specifies whether [ITU-T G.998.4]
+//			retransmission is used (i.e., active in showtime) in the downstream transmit direction. The
+//			valid range of values is given in clause 7.5.1.38 of [ITU-T G.997.1]. (R) (mandatory) (1 byte)
+//
+//		Retransmission Used Upstream Rtx_Usedus
+//			Retransmission used upstream (RTX_USEDus): This parameter specifies whether [ITUT G.998.4]
+//			retransmission is used (i.e., active in showtime) in the upstream transmit direction. The valid
+//			range of values is given in clause 7.5.1.38 of [ITU-T G.997.1]. (R) (mandatory) (1 byte)
+//
+//		Date_Time_Stamping Of Near_End Test Parameters Stamp_Test_Ne
+//			(R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Far_End Test Parameters Stamp_Test_Fe
+//			Date/time-stamping of far-end test parameters (STAMP-TEST-FE): This parameter indicates the
+//			date/time when the far-end test parameters that can change during showtime were last updated.
+//			See clause 7.5.1.36.4 of [ITUT G.997.1]. The format of this parameter is the same as STAMP-TEST-
+//			NE. (R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Downstream Olr Operation Stamp_Olr_Ds
+//			Date/time-stamping of last successful downstream OLR operation (STAMP-OLR-ds): This parameter
+//			indicates the date/time of the last successful OLR execution in the downstream direction that
+//			has modified the bits or gains. See clause 7.5.1.37.1 of [ITU-T G.997.1]. The format of this
+//			parameter is the same as STAMP-TEST-NE. (R) (optional) (7 bytes)
+//
+//		Date_Time_Stamping Of Last Successful Upstream Olr Operation Stamp_Olr_Us
+//			Date/time-stamping of last successful upstream OLR operation (STAMP-OLR-us): This parameter
+//			indicates the date/time of the last successful OLR execution in the upstream direction that has
+//			modified the bits or gains. See clause 7.5.1.37.2 of [ITU-T G.997.1]. The format of this
+//			parameter is the same as STAMP-TEST-NE. (R) (optional) (7 bytes)
+//
+type XdslLineInventoryAndStatusDataPart8 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdsllineinventoryandstatusdatapart8BME = &ManagedEntityDefinition{
+		Name:    "XdslLineInventoryAndStatusDataPart8",
+		ClassID: 414,
+		MessageTypes: mapset.NewSetWith(
+			Get,
+		),
+		AllowedAttributeMask: 0XFC00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read), false, false, false, false, 0),
+			1: ByteField("RetransmissionUsedDownstreamRtxUsedds", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: ByteField("RetransmissionUsedUpstreamRtxUsedus", 0, mapset.NewSetWith(Read), false, false, false, false, 2),
+			3: MultiByteField("DateTimeStampingOfNearEndTestParametersStampTestNe", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4: MultiByteField("DateTimeStampingOfFarEndTestParametersStampTestFe", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: MultiByteField("DateTimeStampingOfLastSuccessfulDownstreamOlrOperationStampOlrDs", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6: MultiByteField("DateTimeStampingOfLastSuccessfulUpstreamOlrOperationStampOlrUs", 7, nil, mapset.NewSetWith(Read), false, false, true, false, 6),
+		},
+	}
+}
+
+// NewXdslLineInventoryAndStatusDataPart8 (class ID 414 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslLineInventoryAndStatusDataPart8(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdsllineinventoryandstatusdatapart8BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingdownstreamprofile.go b/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingdownstreamprofile.go
new file mode 100644
index 0000000..decef59
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingdownstreamprofile.go
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslSubcarrierMaskingDownstreamProfileClassId ClassID = ClassID(108)
+
+var xdslsubcarriermaskingdownstreamprofileBME *ManagedEntityDefinition
+
+// XdslSubcarrierMaskingDownstreamProfile (class ID #108)
+//	This ME contains the subcarrier masking downstream profile for an xDSL UNI. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, set-by-create) (mandatory) (2 bytes)
+//
+//		Downstream Subcarrier Mask 1
+//			Downstream subcarrier mask 1: Subcarriers 1 to 128. (R, W, set-by-create) (mandatory) (16 bytes)
+//
+//		Downstream Subcarrier Mask 2
+//			Downstream subcarrier mask 2: Subcarriers 129 to 256. (R, W) (mandatory for modems that support
+//			NSCds > 128) (16 bytes)
+//
+//		Downstream Subcarrier Mask 3
+//			Downstream subcarrier mask 3: Subcarriers 257 to 384. (R, W) (mandatory for modems that support
+//			NSCds > 256) (16 bytes)
+//
+//		Downstream Subcarrier Mask 4
+//			Downstream subcarrier mask 4: Subcarriers 385 to 512. (R, W) (mandatory for modems that support
+//			NSCds > 384) (16 bytes)
+//
+//		Mask Valid
+//			(R, W) (mandatory) (1 byte)
+//
+type XdslSubcarrierMaskingDownstreamProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslsubcarriermaskingdownstreamprofileBME = &ManagedEntityDefinition{
+		Name:    "XdslSubcarrierMaskingDownstreamProfile",
+		ClassID: 108,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: MultiByteField("DownstreamSubcarrierMask1", 16, nil, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+			2: MultiByteField("DownstreamSubcarrierMask2", 16, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 2),
+			3: MultiByteField("DownstreamSubcarrierMask3", 16, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 3),
+			4: MultiByteField("DownstreamSubcarrierMask4", 16, nil, mapset.NewSetWith(Read, Write), false, false, false, false, 4),
+			5: ByteField("MaskValid", 0, mapset.NewSetWith(Read, Write), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewXdslSubcarrierMaskingDownstreamProfile (class ID 108 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslSubcarrierMaskingDownstreamProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslsubcarriermaskingdownstreamprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingupstreamprofile.go b/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingupstreamprofile.go
new file mode 100644
index 0000000..76d3dd0
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslsubcarriermaskingupstreamprofile.go
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslSubcarrierMaskingUpstreamProfileClassId ClassID = ClassID(109)
+
+var xdslsubcarriermaskingupstreamprofileBME *ManagedEntityDefinition
+
+// XdslSubcarrierMaskingUpstreamProfile (class ID #109)
+//	This ME contains the subcarrier masking upstream profile for an xDSL UNI. An instance of this ME
+//	is created and deleted by the OLT.
+//
+//	Relationships
+//		An instance of this ME may be associated with zero or more instances of the PPTP xDSL UNI part
+//		1.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The value 0 is
+//			reserved. (R, setbycreate) (mandatory) (2 bytes)
+//
+//		Upstream Subcarrier Mask
+//			Subcarrier number 1 is the lowest, and the number of xDSL subcarriers, upstream (NSCus) is the
+//			highest subcarrier that can be transmitted in the upstream direction. For [ITUT G.992.3],
+//			[ITUT G.992.4] and [ITUT G.992.5], it is defined in the corresponding Recommendation. For Annex
+//			A of [ITUT G.992.1] and [ITUT G.992.2], NSCus = 32 and for Annex B of [ITUT G.992.1], NSCus =
+//			64. (R, W, setbycreate) (mandatory) (8 bytes)
+//
+type XdslSubcarrierMaskingUpstreamProfile struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslsubcarriermaskingupstreamprofileBME = &ManagedEntityDefinition{
+		Name:    "XdslSubcarrierMaskingUpstreamProfile",
+		ClassID: 109,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0X8000,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: Uint64Field("UpstreamSubcarrierMask", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 1),
+		},
+	}
+}
+
+// NewXdslSubcarrierMaskingUpstreamProfile (class ID 109 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslSubcarrierMaskingUpstreamProfile(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslsubcarriermaskingupstreamprofileBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslxtu-cchannelperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xdslxtu-cchannelperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..0c501ca
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslxtu-cchannelperformancemonitoringhistorydata.go
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslXtuCChannelPerformanceMonitoringHistoryDataClassId ClassID = ClassID(114)
+
+var xdslxtucchannelperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XdslXtuCChannelPerformanceMonitoringHistoryData (class ID #114)
+//	This ME collects PM data of an xTUC to xTUR channel as seen from the xTU-C. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL bearer channel. Several instances may
+//		therefore be associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Corrected Blocks
+//			Corrected blocks: This attribute counts blocks received with errors that were corrected on this
+//			channel. (R) (mandatory) (4 bytes)
+//
+//		Uncorrected Blocks
+//			Uncorrected blocks: This attribute counts blocks received with uncorrectable errors on this
+//			channel. (R) (mandatory) (4 bytes)
+//
+//		Transmitted Blocks
+//			Transmitted blocks:	This attribute counts encoded blocks transmitted on this channel. (R)
+//			(mandatory) (4 bytes)
+//
+//		Received Blocks
+//			Received blocks: This attribute counts encoded blocks received on this channel. (R) (mandatory)
+//			(4 bytes)
+//
+//		Code Violations
+//			Code violations: This attribute counts CRC-8 anomalies in the bearer channel. (R) (mandatory)
+//			(2 bytes)
+//
+//		Forward Error Corrections
+//			Forward error corrections: This attribute counts FEC anomalies in the bearer channel. (R)
+//			(mandatory) (2 bytes)
+//
+type XdslXtuCChannelPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslxtucchannelperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XdslXtuCChannelPerformanceMonitoringHistoryData",
+		ClassID: 114,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("CorrectedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("UncorrectedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("TransmittedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("ReceivedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint16Field("CodeViolations", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint16Field("ForwardErrorCorrections", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewXdslXtuCChannelPerformanceMonitoringHistoryData (class ID 114 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslXtuCChannelPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslxtucchannelperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..305f465
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydata.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslXtuCPerformanceMonitoringHistoryDataClassId ClassID = ClassID(112)
+
+var xdslxtucperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XdslXtuCPerformanceMonitoringHistoryData (class ID #112)
+//	This ME collects PM data on the xTUC to xTUR path as seen from the xTU-C. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Loss Of Frame Seconds
+//			Loss of frame seconds: (R) (mandatory) (2 bytes)
+//
+//		Loss Of Signal Seconds
+//			Loss of signal seconds: (R) (mandatory) (2 bytes)
+//
+//		Loss Of Link Seconds
+//			Loss of link seconds: (R) (mandatory) (2 bytes)
+//
+//		Loss Of Power Seconds
+//			Loss of power seconds: (R) (mandatory) (2 bytes)
+//
+//		Errored Seconds Es
+//			Errored seconds (ES): This attribute counts 1 s intervals with one or more CRC8 anomalies summed
+//			over all received bearer channels, or one or more loss of signal (LOS) defects, or one or more
+//			SEF defects, or one or more LPR defects. (R) (mandatory) (2 bytes)
+//
+//		Severely Errored Seconds
+//			(R) (mandatory) (2 bytes)
+//
+//		Line Initializations
+//			Line initializations: This attribute counts the total number of full initializations attempted
+//			on the line, both successful and failed. (R) (mandatory) (2 bytes)
+//
+//		Failed Line Initializations
+//			Failed line initializations: This attribute counts the total number of failed full
+//			initializations during the accumulation period. A failed full initialization occurs when
+//			showtime is not reached at the end of the full initialization procedure. (R) (mandatory)
+//			(2 bytes)
+//
+//		Short Initializations
+//			Short initializations: This attribute counts the total number of fast retrains or short
+//			initializations attempted on the line, successful and failed. Fast retrain is defined in [ITUT
+//			G.992.2]. Short initialization is defined in [ITUT G.992.3] and [ITUT G.992.4]. (R) (optional)
+//			(2 bytes)
+//
+//		Failed Short Initializations
+//			(R) (optional) (2 bytes)
+//
+//		Fec Seconds
+//			FEC seconds: This attribute counts seconds during which there was an FEC anomaly. (R)
+//			(mandatory) (2 bytes)
+//
+//		Unavailable Seconds
+//			Unavailable seconds: This attribute counts 1 s intervals during which the xDSL UNI is
+//			unavailable. The line becomes unavailable at the onset of 10 contiguous SES-Ls. The 10 SES-Ls
+//			are included in unavailable time. Once unavailable, the line becomes available at the onset of
+//			10 contiguous seconds that are not severely errored. The 10 s with no SESLs are excluded from
+//			unavailable time. Some attribute counts are inhibited during unavailability – see clause
+//			7.2.7.13 of [ITUT G.997.1]. (R) (mandatory) (2 bytes)
+//
+//		Sos Success Count, Near End
+//			SOS success count, near end: The SOS-SUCCESS-NE attribute is a count of the total number of
+//			successful SOS procedures initiated by the near-end xTU on the line during the accumulation
+//			period. Successful SOS is defined in clause 12.1.4 of [ITUT G.993.2]. (R) (optional) (2 bytes)
+//
+//		Sos Success Count, Far End
+//			SOS success count, far end: The SOS-SUCCESS-FE attribute is a count of the total number of
+//			successful SOS procedures initiated by the far-end xTU on the line during the accumulation
+//			period. Successful SOS is defined in clause 12.1.4 of [ITUT G.993.2]. (R) (optional) (2 bytes)
+//
+type XdslXtuCPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslxtucperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XdslXtuCPerformanceMonitoringHistoryData",
+		ClassID: 112,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("LossOfFrameSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint16Field("LossOfSignalSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint16Field("LossOfLinkSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint16Field("LossOfPowerSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint16Field("ErroredSecondsEs", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("SeverelyErroredSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint16Field("LineInitializations", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint16Field("FailedLineInitializations", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint16Field("ShortInitializations", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: Uint16Field("FailedShortInitializations", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+			13: Uint16Field("FecSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 13),
+			14: Uint16Field("UnavailableSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 14),
+			15: Uint16Field("SosSuccessCount,NearEnd", 0, mapset.NewSetWith(Read), false, false, true, false, 15),
+			16: Uint16Field("SosSuccessCount,FarEnd", 0, mapset.NewSetWith(Read), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewXdslXtuCPerformanceMonitoringHistoryData (class ID 112 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslXtuCPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslxtucperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydatapart2.go b/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydatapart2.go
new file mode 100644
index 0000000..ebc8698
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslxtu-cperformancemonitoringhistorydatapart2.go
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslXtuCPerformanceMonitoringHistoryDataPart2ClassId ClassID = ClassID(408)
+
+var xdslxtucperformancemonitoringhistorydatapart2BME *ManagedEntityDefinition
+
+// XdslXtuCPerformanceMonitoringHistoryDataPart2 (class ID #408)
+//	This ME collects PM data on the xTUC to xTUR path as seen from the xTU-C. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 and 2 MEs
+//			that contain PM threshold values. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Leftr Defect Seconds
+//			"leftr" defect seconds: If retransmission is used, this attribute is a count of the seconds with
+//			a near-end ''leftr'' defect present – see clause 7.2.1.1.6 of [ITU-T G.997.1]. (R) (mandatory)
+//			(2 bytes)
+//
+//		Error_Free Bits Counter
+//			Error-free bits counter: If retransmission is used, this attribute is a count of the number of
+//			error-free bits passed over the β1 reference point, divided by 216 – see clause 7.2.1.1.7 of
+//			[ITU-T G.997.1]. (R) (mandatory) (4 bytes)
+//
+//		Minimum Error_Free Throughput Mineftr
+//			Minimum error-free throughput (MINEFTR): If retransmission is used, this attribute is the
+//			minimum error-free throughput in bits per second – see clause 7.2.1.1.8 of [ITUT G.997.1]. (R)
+//			(mandatory) (4 bytes)
+//
+type XdslXtuCPerformanceMonitoringHistoryDataPart2 struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslxtucperformancemonitoringhistorydatapart2BME = &ManagedEntityDefinition{
+		Name:    "XdslXtuCPerformanceMonitoringHistoryDataPart2",
+		ClassID: 408,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XF800,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint16Field("LeftrDefectSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("ErrorFreeBitsCounter", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("MinimumErrorFreeThroughputMineftr", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+		},
+	}
+}
+
+// NewXdslXtuCPerformanceMonitoringHistoryDataPart2 (class ID 408 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslXtuCPerformanceMonitoringHistoryDataPart2(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslxtucperformancemonitoringhistorydatapart2BME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslxtu-rchannelperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xdslxtu-rchannelperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..32f1a01
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslxtu-rchannelperformancemonitoringhistorydata.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslXtuRChannelPerformanceMonitoringHistoryDataClassId ClassID = ClassID(115)
+
+var xdslxturchannelperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XdslXtuRChannelPerformanceMonitoringHistoryData (class ID #115)
+//	This ME collects PM data of the xTUC to xTUR channel as seen from the xTU-R. Instances of this
+//	ME are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL bearer channel. Several instances may
+//		therefore be associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. The two MSBs of
+//			the first byte are the bearer channel ID. Excluding the first 2 bits of the first byte, the
+//			remaining part of the ME ID is identical to that of this ME's parent PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Corrected Blocks
+//			Corrected blocks: This attribute counts blocks received with errors that were corrected on this
+//			channel. (R) (mandatory) (4 bytes)
+//
+//		Uncorrected Blocks
+//			Uncorrected blocks: This attribute counts blocks received with uncorrectable errors on this
+//			channel. (R) (mandatory) (4 bytes)
+//
+//		Transmitted Blocks
+//			Transmitted blocks: This attribute counts encoded blocks transmitted on this channel. (R)
+//			(mandatory) (4 bytes)
+//
+//		Received Blocks
+//			Received blocks: This attribute counts encoded blocks received on this channel. (R) (mandatory)
+//			(4 bytes)
+//
+//		Code Violations
+//			Code violations: This attribute counts FEBE anomalies reported in the downstream bearer channel.
+//			If the CRC is applied over multiple bearer channels, then each related FEBE anomaly increments
+//			each of the counters related to the individual bearer channels. (R) (mandatory) (2 bytes)
+//
+//		Forward Error Corrections
+//			Forward error corrections: This attribute counts FFEC anomalies reported in the downstream
+//			bearer channel. If FEC is applied over multiple bearer channels, each related FFEC anomaly
+//			increments each of the counters related to the individual bearer channels. (R) (mandatory)
+//			(2 bytes)
+//
+type XdslXtuRChannelPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslxturchannelperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XdslXtuRChannelPerformanceMonitoringHistoryData",
+		ClassID: 115,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("CorrectedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4: Uint32Field("UncorrectedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5: Uint32Field("TransmittedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6: Uint32Field("ReceivedBlocks", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7: Uint16Field("CodeViolations", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8: Uint16Field("ForwardErrorCorrections", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+		},
+	}
+}
+
+// NewXdslXtuRChannelPerformanceMonitoringHistoryData (class ID 115 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslXtuRChannelPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslxturchannelperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xdslxtu-rperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xdslxtu-rperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..bbe2613
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xdslxtu-rperformancemonitoringhistorydata.go
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XdslXtuRPerformanceMonitoringHistoryDataClassId ClassID = ClassID(113)
+
+var xdslxturperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XdslXtuRPerformanceMonitoringHistoryData (class ID #113)
+//	This ME collects PM data of the xTUC to xTUR path as seen from the xTU-R. Instances of this ME
+//	are created and deleted by the OLT.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an xDSL UNI.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the PPTP xDSL UNI part 1. (R,
+//			setbycreate) (mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, setbycreate) (mandatory) (2 bytes)
+//
+//		Loss Of Frame Seconds
+//			Loss of frame seconds: (R) (mandatory) (2 bytes)
+//
+//		Loss Of Signal Seconds
+//			Loss of signal seconds: (R) (mandatory) (2 bytes)
+//
+//		Loss Of Power Seconds
+//			Loss of power seconds: (R) (mandatory) (2 bytes)
+//
+//		Errored Seconds
+//			Errored seconds: This attribute counts 1 s intervals with one or more far end block error (FEBE)
+//			anomalies summed over all transmitted bearer channels, or one or more LOSFE defects, or one or
+//			more RDI defects, or one or more LPR-FE defects. (R) (mandatory) (2 bytes)
+//
+//		Severely Errored Seconds
+//			(R) (mandatory) (2 bytes)
+//
+//		Fec Seconds
+//			FEC seconds: This attribute counts seconds during which there was an FEC anomaly. (R)
+//			(mandatory) (2 bytes)
+//
+//		Unavailable Seconds
+//			(R) (mandatory) (2 bytes)
+//
+//		Leftr Defect Seconds
+//			"leftr" defect seconds: If retransmission is used, this parameter is a count of the seconds with
+//			a near-end ''leftr'' defect present – see clause 7.2.1.1.6 of [ITUT G.997.1]. (R) (optional)
+//			(2 bytes)
+//
+//		Error_Free Bits Counter
+//			Error-free bits counter: If retransmission is used, this parameter is a count of the number of
+//			error-free bits passed over the β1 reference point, divided by 216 – see clause 7.2.1.1.7 of
+//			[ITU-T G.997.1]. (R) (optional) (4 bytes)
+//
+//		Minimum Error_Free Throughput Mineftr
+//			Minimum error-free throughput (MINEFTR): If retransmission is used, this parameter is the
+//			minimum error-free throughput in bits per second – see clause 7.2.1.1.8 of [ITUT G.997.1]. (R)
+//			(optional) (4 bytes)
+//
+type XdslXtuRPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xdslxturperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XdslXtuRPerformanceMonitoringHistoryData",
+		ClassID: 113,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFF0,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint16Field("LossOfFrameSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 3),
+			4:  Uint16Field("LossOfSignalSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 4),
+			5:  Uint16Field("LossOfPowerSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 5),
+			6:  Uint16Field("ErroredSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint16Field("SeverelyErroredSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 7),
+			8:  Uint16Field("FecSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 8),
+			9:  Uint16Field("UnavailableSeconds", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint16Field("LeftrDefectSeconds", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint32Field("ErrorFreeBitsCounter", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: Uint32Field("MinimumErrorFreeThroughputMineftr", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+		},
+	}
+}
+
+// NewXdslXtuRPerformanceMonitoringHistoryData (class ID 113 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXdslXtuRPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xdslxturperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xg-pondownstreammanagementperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xg-pondownstreammanagementperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..e8facf8
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xg-pondownstreammanagementperformancemonitoringhistorydata.go
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XgPonDownstreamManagementPerformanceMonitoringHistoryDataClassId ClassID = ClassID(345)
+
+var xgpondownstreammanagementperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XgPonDownstreamManagementPerformanceMonitoringHistoryData (class ID #345)
+//	This ME collects PM data associated with the XG-PON TC layer. It collects counters associated
+//	with downstream PLOAM and OMCI messages.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an ANI-G.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the ANI-G. (R, set-by-create)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. Since no threshold value attribute number exceeds 7, a threshold
+//			data 2 ME is optional. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Ploam Message Integrity Check Mic Error Count
+//			PLOAM message integrity check (MIC) error count: This attribute counts MIC errors detected in
+//			downstream PLOAM messages, either directed to this ONU or broadcast to all ONUs. (R) (optional)
+//			(4 bytes)
+//
+//		Downstream Ploam Messages Count
+//			Downstream PLOAM messages count: This attribute counts PLOAM messages received, either directed
+//			to this ONU or broadcast to all ONUs. (R) (optional) (4 bytes)
+//
+//		Profile Messages Received
+//			Profile messages received: This attribute counts the number of profile messages received, either
+//			directed to this ONU or broadcast to all ONUs. In [ITU-T G.9807.1], this attribute is used for
+//			received burst_profile message count.  (R) (optional) (4 bytes)
+//
+//		Ranging_Time Messages Received
+//			Ranging_time messages received: This attribute counts the number of ranging_time messages
+//			received, either directed to this ONU or broadcast to all ONUs. (R) (mandatory) (4 bytes)
+//
+//		Deactivate_Onu_Id Messages Received
+//			Deactivate_ONU-ID messages received: This attribute counts the number of deactivate_ONU-ID
+//			messages received, either directed to this ONU or broadcast to all ONUs. Deactivate_ONU-ID
+//			messages do not reset this counter. (R) (optional) (4 bytes)
+//
+//		Disable_Serial_Number Messages Received
+//			Disable_serial_number messages received: This attribute counts the number of
+//			disable_serial_number messages received, whose serial number specified this ONU. (R) (optional)
+//			(4 bytes)
+//
+//		Request_Registration Messages Received
+//			Request_registration messages received: This attribute counts the number of request_registration
+//			messages received. (R) (optional) (4 bytes)
+//
+//		Assign_Alloc_Id Messages Received
+//			Assign_alloc-ID messages received: This attribute counts the number of assign_alloc-ID messages
+//			received. (R) (optional) (4 bytes)
+//
+//		Key_Control Messages Received
+//			Key_control messages received: This attribute counts the number of key_control messages
+//			received, either directed to this ONU or broadcast to all ONUs. (R) (optional) (4 bytes)
+//
+//		Sleep_Allow Messages Received
+//			Sleep_allow messages received: This attribute counts the number of sleep_allow messages
+//			received, either directed to this ONU or broadcast to all ONUs. (R) (optional) (4 bytes)
+//
+//		Baseline Omci Messages Received Count
+//			Baseline OMCI messages received count: This attribute counts the number of OMCI messages
+//			received in the baseline message format. (R) (optional) (4 bytes)
+//
+//		Extended Omci Messages Received Count
+//			Extended OMCI messages received count: This attribute counts the number of OMCI messages
+//			received in the extended message format. (R) (optional) (4 bytes)
+//
+//		Assign_Onu_Id Messages Received
+//			Assign_ONU-ID messages received: This attribute counts the number of assign_ONU-ID messages
+//			received since the last re-boot. (R) (optional) (4 bytes)
+//
+//		Omci Mic Error Count
+//			OMCI MIC error count: This attribute counts MIC errors detected in OMCI messages directed to
+//			this ONU. (R) (optional) (4 bytes)
+//
+type XgPonDownstreamManagementPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xgpondownstreammanagementperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XgPonDownstreamManagementPerformanceMonitoringHistoryData",
+		ClassID: 345,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFF,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("PloamMessageIntegrityCheckMicErrorCount", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  Uint32Field("DownstreamPloamMessagesCount", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5:  Uint32Field("ProfileMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  Uint32Field("RangingTimeMessagesReceived", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("DeactivateOnuIdMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8:  Uint32Field("DisableSerialNumberMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint32Field("RequestRegistrationMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 9),
+			10: Uint32Field("AssignAllocIdMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 10),
+			11: Uint32Field("KeyControlMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 11),
+			12: Uint32Field("SleepAllowMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+			13: Uint32Field("BaselineOmciMessagesReceivedCount", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint32Field("ExtendedOmciMessagesReceivedCount", 0, mapset.NewSetWith(Read), false, false, true, false, 14),
+			15: Uint32Field("AssignOnuIdMessagesReceived", 0, mapset.NewSetWith(Read), false, false, true, false, 15),
+			16: Uint32Field("OmciMicErrorCount", 0, mapset.NewSetWith(Read), false, false, true, false, 16),
+		},
+	}
+}
+
+// NewXgPonDownstreamManagementPerformanceMonitoringHistoryData (class ID 345 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXgPonDownstreamManagementPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xgpondownstreammanagementperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xg-pontcperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xg-pontcperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..d22c7bb
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xg-pontcperformancemonitoringhistorydata.go
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XgPonTcPerformanceMonitoringHistoryDataClassId ClassID = ClassID(344)
+
+var xgpontcperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XgPonTcPerformanceMonitoringHistoryData (class ID #344)
+//	This ME collects PM data associated with the XG-PON TC layer.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an ANI-G.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the ANI-G. (R, set-by-create)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: This attribute points to an instance of the threshold data 1 ME that
+//			contains PM threshold values. (R, W, set-by-create) (mandatory) (2 bytes)
+//
+//		Psbd Hec Error Count
+//			PSBd HEC error count: This attribute counts HEC errors in any of the fields of the downstream
+//			physical sync block. (R) (optional) (4 bytes)
+//
+//		Xgtc Hec Error Count
+//			XGTC HEC error count: This attribute counts HEC errors detected in the XGTC header. In [ITU-T
+//			G.9807.1], this attribute is used for framing sublayer (FS) HEC error count management. (R)
+//			(optional) (4 bytes)
+//
+//		Unknown Profile Count
+//			Unknown profile count: This attribute counts the number of grants received whose specified
+//			profile was not known to the ONU. (R) (optional) (4 bytes)
+//
+//		Transmitted Xg_Pon Encapsulation Method Xgem Frames
+//			Transmitted XG-PON encapsulation method (XGEM) frames: This attribute counts the number of non-
+//			idle XGEM frames transmitted. If a service data unit (SDU) is fragmented, each fragment is an
+//			XGEM frame and is counted as such. (R) (mandatory) (4 bytes)
+//
+//		Fragment Xgem Frames
+//			Fragment XGEM frames: This attribute counts the number of XGEM frames that represent fragmented
+//			SDUs, as indicated by the LF bit = 0. (R) (optional) (4 bytes)
+//
+//		Xgem Hec Lost Words Count
+//			XGEM HEC lost words count: This attribute counts the number of 4 byte words lost because of an
+//			XGEM frame HEC error. In general, all XGTC payload following the error is lost, until the next
+//			PSBd event. (R) (optional) (4 bytes)
+//
+//		Xgem Key Errors
+//			(R) (mandatory) (4 bytes)
+//
+//		Xgem Hec Error Count
+//			XGEM HEC error count: This attribute counts the number of instances of an XGEM frame HEC error.
+//			(R) (mandatory) (4 bytes)
+//
+//		Transmitted Bytes In Non_Idle Xgem Frames
+//			Transmitted bytes in non-idle XGEM frames: This attribute counts the number of transmitted bytes
+//			in non-idle XGEM frames. (R) (mandatory) (8 bytes)
+//
+//		Received Bytes In Non_Idle Xgem Frames
+//			Received bytes in non-idle XGEM frames: This attribute counts the number of received bytes in
+//			non-idle XGEM frames. (R) (optional) (8 bytes)
+//
+//		Loss Of Downstream Synchronization Lods Event Count
+//			Loss of downstream synchronization (LODS) event count: This attribute counts the number of state
+//			transitions from O5.1 to O6. (R) (optional) (4 bytes)
+//
+//		Lods Event Restored Count
+//			LODS event restored count: This attribute counts the number of LODS cleared events. (R)
+//			(optional) (4 bytes)
+//
+//		Onu Reactivation By Lods Events
+//			ONU reactivation by LODS events: This attribute counts the number of LODS events resulting in
+//			ONU reactivation without synchronization being reacquired. (R) (optional) (4 bytes)
+//
+type XgPonTcPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xgpontcperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XgPonTcPerformanceMonitoringHistoryData",
+		ClassID: 344,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFFFE,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0:  Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1:  ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2:  Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3:  Uint32Field("PsbdHecErrorCount", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4:  Uint32Field("XgtcHecErrorCount", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5:  Uint32Field("UnknownProfileCount", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6:  Uint32Field("TransmittedXgPonEncapsulationMethodXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 6),
+			7:  Uint32Field("FragmentXgemFrames", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8:  Uint32Field("XgemHecLostWordsCount", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+			9:  Uint32Field("XgemKeyErrors", 0, mapset.NewSetWith(Read), false, false, false, false, 9),
+			10: Uint32Field("XgemHecErrorCount", 0, mapset.NewSetWith(Read), false, false, false, false, 10),
+			11: Uint64Field("TransmittedBytesInNonIdleXgemFrames", 0, mapset.NewSetWith(Read), false, false, false, false, 11),
+			12: Uint64Field("ReceivedBytesInNonIdleXgemFrames", 0, mapset.NewSetWith(Read), false, false, true, false, 12),
+			13: Uint32Field("LossOfDownstreamSynchronizationLodsEventCount", 0, mapset.NewSetWith(Read), false, false, true, false, 13),
+			14: Uint32Field("LodsEventRestoredCount", 0, mapset.NewSetWith(Read), false, false, true, false, 14),
+			15: Uint32Field("OnuReactivationByLodsEvents", 0, mapset.NewSetWith(Read), false, false, true, false, 15),
+		},
+	}
+}
+
+// NewXgPonTcPerformanceMonitoringHistoryData (class ID 344 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXgPonTcPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xgpontcperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/generated/xg-ponupstreammanagementperformancemonitoringhistorydata.go b/vendor/github.com/cboling/omci/generated/xg-ponupstreammanagementperformancemonitoringhistorydata.go
new file mode 100644
index 0000000..9c8fda5
--- /dev/null
+++ b/vendor/github.com/cboling/omci/generated/xg-ponupstreammanagementperformancemonitoringhistorydata.go
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package generated
+
+import "github.com/deckarep/golang-set"
+
+const XgPonUpstreamManagementPerformanceMonitoringHistoryDataClassId ClassID = ClassID(346)
+
+var xgponupstreammanagementperformancemonitoringhistorydataBME *ManagedEntityDefinition
+
+// XgPonUpstreamManagementPerformanceMonitoringHistoryData (class ID #346)
+//	This ME collects PM data associated with the XG-PON TC layer. It counts upstream PLOAM messages
+//	transmitted by the ONU.
+//
+//	For a complete discussion of generic PM architecture, refer to clause I.4.
+//
+//	Relationships
+//		An instance of this ME is associated with an ANI-G.
+//
+//	Attributes
+//		Managed Entity Id
+//			Managed entity ID: This attribute uniquely identifies each instance of this ME. Through an
+//			identical ID, this ME is implicitly linked to an instance of the ANI-G. (R, set-by-create)
+//			(mandatory) (2 bytes)
+//
+//		Interval End Time
+//			Interval end time: This attribute identifies the most recently finished 15 min interval. (R)
+//			(mandatory) (1 byte)
+//
+//		Threshold Data 1_2 Id
+//			Threshold data 1/2 ID: No thresholds are defined for this ME. For uniformity with other PM, the
+//			attribute is retained and shown as mandatory, but it should be set to a null pointer. (R, W,
+//			set-by-create) (mandatory) (2 bytes)
+//
+//		Upstream Ploam Message Count
+//			Upstream PLOAM message count: This attribute counts PLOAM messages transmitted upstream,
+//			excluding acknowledge messages. (R) (optional) (4 bytes)
+//
+//		Serial_Number_Onu Message Count
+//			Serial_number_ONU message count: This attribute counts Serial_number_ONU PLOAM messages
+//			transmitted. (R) (optional) (4 bytes)
+//
+//		Registration Message Count
+//			Registration message count: This attribute counts Registration PLOAM messages transmitted. (R)
+//			(optional) (4 bytes)
+//
+//		Key_Report Message Count
+//			Key_report message count: This attribute counts key_report PLOAM messages transmitted. (R)
+//			(optional) (4 bytes)
+//
+//		Acknowledge Message Count
+//			Acknowledge message count: This attribute counts acknowledge PLOAM messages transmitted. It
+//			includes all forms of acknowledgement (AK), including those transmitted in response to a PLOAM
+//			grant when the ONU has nothing to send. (R) (optional) (4 bytes)
+//
+//		Sleep_Request Message Count
+//			Sleep_request message count: This attribute counts sleep_request PLOAM messages transmitted. (R)
+//			(optional) (4 bytes)
+//
+type XgPonUpstreamManagementPerformanceMonitoringHistoryData struct {
+	ManagedEntityDefinition
+	Attributes AttributeValueMap
+}
+
+func init() {
+	xgponupstreammanagementperformancemonitoringhistorydataBME = &ManagedEntityDefinition{
+		Name:    "XgPonUpstreamManagementPerformanceMonitoringHistoryData",
+		ClassID: 346,
+		MessageTypes: mapset.NewSetWith(
+			Create,
+			Delete,
+			Get,
+			Set,
+		),
+		AllowedAttributeMask: 0XFF00,
+		AttributeDefinitions: AttributeDefinitionMap{
+			0: Uint16Field("ManagedEntityId", 0, mapset.NewSetWith(Read, SetByCreate), false, false, false, false, 0),
+			1: ByteField("IntervalEndTime", 0, mapset.NewSetWith(Read), false, false, false, false, 1),
+			2: Uint16Field("ThresholdData12Id", 0, mapset.NewSetWith(Read, SetByCreate, Write), false, false, false, false, 2),
+			3: Uint32Field("UpstreamPloamMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 3),
+			4: Uint32Field("SerialNumberOnuMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 4),
+			5: Uint32Field("RegistrationMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 5),
+			6: Uint32Field("KeyReportMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 6),
+			7: Uint32Field("AcknowledgeMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 7),
+			8: Uint32Field("SleepRequestMessageCount", 0, mapset.NewSetWith(Read), false, false, true, false, 8),
+		},
+	}
+}
+
+// NewXgPonUpstreamManagementPerformanceMonitoringHistoryData (class ID 346 creates the basic
+// Managed Entity definition that is used to validate an ME of this type that
+// is received from the wire, about to be sent on the wire.
+func NewXgPonUpstreamManagementPerformanceMonitoringHistoryData(params ...ParamData) (*ManagedEntity, OmciErrors) {
+	return NewManagedEntity(xgponupstreammanagementperformancemonitoringhistorydataBME, params...)
+}
diff --git a/vendor/github.com/cboling/omci/layers.go b/vendor/github.com/cboling/omci/layers.go
new file mode 100644
index 0000000..f895f14
--- /dev/null
+++ b/vendor/github.com/cboling/omci/layers.go
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package omci
+
+import (
+	"errors"
+	me "github.com/cboling/omci/generated"
+	"github.com/google/gopacket"
+)
+
+var nextLayerMapping map[MessageType]gopacket.LayerType
+
+var (
+	LayerTypeCreateRequest                gopacket.LayerType
+	LayerTypeDeleteRequest                gopacket.LayerType
+	LayerTypeSetRequest                   gopacket.LayerType
+	LayerTypeGetRequest                   gopacket.LayerType
+	LayerTypeGetAllAlarmsRequest          gopacket.LayerType
+	LayerTypeGetAllAlarmsNextRequest      gopacket.LayerType
+	LayerTypeMibUploadRequest             gopacket.LayerType
+	LayerTypeMibUploadNextRequest         gopacket.LayerType
+	LayerTypeMibResetRequest              gopacket.LayerType
+	LayerTypeTestRequest                  gopacket.LayerType
+	LayerTypeStartSoftwareDownloadRequest gopacket.LayerType
+	LayerTypeDownloadSectionRequest       gopacket.LayerType
+	LayerTypeDownloadSectionLastRequest   gopacket.LayerType
+	LayerTypeEndSoftwareDownloadRequest   gopacket.LayerType
+	LayerTypeActivateSoftwareRequest      gopacket.LayerType
+	LayerTypeCommitSoftwareRequest        gopacket.LayerType
+	LayerTypeSynchronizeTimeRequest       gopacket.LayerType
+	LayerTypeRebootRequest                gopacket.LayerType
+	LayerTypeGetNextRequest               gopacket.LayerType
+	LayerTypeGetCurrentDataRequest        gopacket.LayerType
+	LayerTypeSetTableRequest              gopacket.LayerType
+)
+var (
+	LayerTypeCreateResponse                gopacket.LayerType
+	LayerTypeDeleteResponse                gopacket.LayerType
+	LayerTypeSetResponse                   gopacket.LayerType
+	LayerTypeGetResponse                   gopacket.LayerType
+	LayerTypeGetAllAlarmsResponse          gopacket.LayerType
+	LayerTypeGetAllAlarmsNextResponse      gopacket.LayerType
+	LayerTypeMibUploadResponse             gopacket.LayerType
+	LayerTypeMibUploadNextResponse         gopacket.LayerType
+	LayerTypeMibResetResponse              gopacket.LayerType
+	LayerTypeAlarmNotification             gopacket.LayerType
+	LayerTypeAttributeValueChange          gopacket.LayerType
+	LayerTypeTestResponse                  gopacket.LayerType
+	LayerTypeStartSoftwareDownloadResponse gopacket.LayerType
+	LayerTypeDownloadSectionResponse       gopacket.LayerType
+	LayerTypeEndSoftwareDownloadResponse   gopacket.LayerType
+	LayerTypeActivateSoftwareResponse      gopacket.LayerType
+	LayerTypeCommitSoftwareResponse        gopacket.LayerType
+	LayerTypeSynchronizeTimeResponse       gopacket.LayerType
+	LayerTypeRebootResponse                gopacket.LayerType
+	LayerTypeGetNextResponse               gopacket.LayerType
+	LayerTypeTestResult                    gopacket.LayerType
+	LayerTypeGetCurrentDataResponse        gopacket.LayerType
+	LayerTypeSetTableResponse              gopacket.LayerType
+)
+
+func mkReqLayer(mt me.MsgType, mts string, decode gopacket.DecodeFunc) gopacket.LayerType {
+	return gopacket.RegisterLayerType(1000+(int(mt)|int(me.AR)),
+		gopacket.LayerTypeMetadata{Name: mts, Decoder: decode})
+}
+
+func mkRespLayer(mt me.MsgType, mts string, decode gopacket.DecodeFunc) gopacket.LayerType {
+	return gopacket.RegisterLayerType(1000+(int(mt)|int(me.AK)),
+		gopacket.LayerTypeMetadata{Name: mts, Decoder: decode})
+}
+
+func mkLayer(mt me.MsgType, mts string, decode gopacket.DecodeFunc) gopacket.LayerType {
+	return gopacket.RegisterLayerType(1000+(int(mt)),
+		gopacket.LayerTypeMetadata{Name: mts, Decoder: decode})
+}
+
+func init() {
+	// Create layers for message_type & action
+	LayerTypeCreateRequest = mkReqLayer(me.Create, "CreateRequest", gopacket.DecodeFunc(decodeCreateRequest))
+	LayerTypeDeleteRequest = mkReqLayer(me.Delete, "DeleteRequest", gopacket.DecodeFunc(decodeDeleteRequest))
+	LayerTypeSetRequest = mkReqLayer(me.Set, "SetRequest", gopacket.DecodeFunc(decodeSetRequest))
+	LayerTypeGetRequest = mkReqLayer(me.Get, "GetRequest", gopacket.DecodeFunc(decodeGetRequest))
+	LayerTypeGetAllAlarmsRequest = mkReqLayer(me.GetAllAlarms, "GetAllAlarmsRequest", gopacket.DecodeFunc(decodeGetAllAlarmsRequest))
+	LayerTypeGetAllAlarmsNextRequest = mkReqLayer(me.GetAllAlarmsNext, "GetAllAlarmsNextRequest", gopacket.DecodeFunc(decodeGetAllAlarmsNextRequest))
+	LayerTypeMibUploadRequest = mkReqLayer(me.MibUpload, "MibUploadRequest", gopacket.DecodeFunc(decodeMibUploadRequest))
+	LayerTypeMibUploadNextRequest = mkReqLayer(me.MibUploadNext, "MibUploadNextRequest", gopacket.DecodeFunc(decodeMibUploadNextRequest))
+	LayerTypeMibResetRequest = mkReqLayer(me.MibReset, "MibResetRequest", gopacket.DecodeFunc(decodeMibResetRequest))
+	LayerTypeTestRequest = mkReqLayer(me.Test, "TestRequest", gopacket.DecodeFunc(decodeTestRequest))
+	LayerTypeStartSoftwareDownloadRequest = mkReqLayer(me.StartSoftwareDownload, "StartSoftwareDownloadRequest", gopacket.DecodeFunc(decodeStartSoftwareDownloadRequest))
+
+	// For Download section, AR=0 if not response expected, AR=1 if response expected (last section of a window)
+	LayerTypeDownloadSectionRequest = mkLayer(me.DownloadSection, "DownloadSectionRequest", gopacket.DecodeFunc(decodeDownloadSectionRequest))
+	LayerTypeDownloadSectionLastRequest = mkReqLayer(me.DownloadSection, "DownloadLastSectionRequest", gopacket.DecodeFunc(decodeDownloadSectionRequest))
+	LayerTypeEndSoftwareDownloadRequest = mkReqLayer(me.EndSoftwareDownload, "EndSoftwareDownloadRequest", gopacket.DecodeFunc(decodeEndSoftwareDownloadRequest))
+	LayerTypeActivateSoftwareRequest = mkReqLayer(me.ActivateSoftware, "ActivateSoftwareRequest", gopacket.DecodeFunc(decodeActivateSoftwareRequest))
+	LayerTypeCommitSoftwareRequest = mkReqLayer(me.CommitSoftware, "CommitSoftwareRequest", gopacket.DecodeFunc(decodeCommitSoftwareRequest))
+	LayerTypeSynchronizeTimeRequest = mkReqLayer(me.SynchronizeTime, "SynchronizeTimeRequest", gopacket.DecodeFunc(decodeSynchronizeTimeRequest))
+	LayerTypeRebootRequest = mkReqLayer(me.Reboot, "RebootRequest", gopacket.DecodeFunc(decodeRebootRequest))
+	LayerTypeGetNextRequest = mkReqLayer(me.GetNext, "GetNextRequest", gopacket.DecodeFunc(decodeGetNextRequest))
+	LayerTypeGetCurrentDataRequest = mkReqLayer(me.GetCurrentData, "GetCurrentDataRequest", gopacket.DecodeFunc(decodeGetCurrentDataRequest))
+	LayerTypeSetTableRequest = mkReqLayer(me.SetTable, "SetTableRequest", gopacket.DecodeFunc(decodeSetTableRequest))
+
+	LayerTypeCreateResponse = mkRespLayer(me.Create, "CreateResponse", gopacket.DecodeFunc(decodeCreateResponse))
+	LayerTypeDeleteResponse = mkRespLayer(me.Delete, "DeleteResponse", gopacket.DecodeFunc(decodeDeleteResponse))
+	LayerTypeSetResponse = mkRespLayer(me.Set, "SetResponse", gopacket.DecodeFunc(decodeSetResponse))
+	LayerTypeGetResponse = mkRespLayer(me.Get, "GetResponse", gopacket.DecodeFunc(decodeGetResponse))
+	LayerTypeGetAllAlarmsResponse = mkRespLayer(me.GetAllAlarms, "GetAllAlarmsResponse", gopacket.DecodeFunc(decodeGetAllAlarmsResponse))
+	LayerTypeGetAllAlarmsNextResponse = mkRespLayer(me.GetAllAlarmsNext, "GetAllAlarmsNextResponse", gopacket.DecodeFunc(decodeGetAllAlarmsNextResponse))
+	LayerTypeMibUploadResponse = mkRespLayer(me.MibUpload, "MibUploadResponse", gopacket.DecodeFunc(decodeMibUploadResponse))
+	LayerTypeMibUploadNextResponse = mkRespLayer(me.MibUploadNext, "MibUploadNextResponse", gopacket.DecodeFunc(decodeMibUploadNextResponse))
+	LayerTypeMibResetResponse = mkRespLayer(me.MibReset, "MibResetResponse", gopacket.DecodeFunc(decodeMibResetResponse))
+	LayerTypeAlarmNotification = mkLayer(me.AlarmNotification, "AlarmNotification", gopacket.DecodeFunc(decodeAlarmNotification))
+	LayerTypeAttributeValueChange = mkLayer(me.AttributeValueChange, "AttributeValueChange", gopacket.DecodeFunc(decodeAttributeValueChange))
+	LayerTypeTestResponse = mkRespLayer(me.Test, "TestResponse", gopacket.DecodeFunc(decodeTestResponse))
+	LayerTypeStartSoftwareDownloadResponse = mkRespLayer(me.StartSoftwareDownload, "StartSoftwareDownloadResponse", gopacket.DecodeFunc(decodeStartSoftwareDownloadResponse))
+	LayerTypeDownloadSectionResponse = mkRespLayer(me.DownloadSection, "DownloadSectionResponse", gopacket.DecodeFunc(decodeDownloadSectionResponse))
+	LayerTypeEndSoftwareDownloadResponse = mkRespLayer(me.EndSoftwareDownload, "EndSoftwareDownloadResponse", gopacket.DecodeFunc(decodeEndSoftwareDownloadResponse))
+	LayerTypeActivateSoftwareResponse = mkRespLayer(me.ActivateSoftware, "ActivateSoftwareResponse", gopacket.DecodeFunc(decodeActivateSoftwareResponse))
+	LayerTypeCommitSoftwareResponse = mkRespLayer(me.CommitSoftware, "CommitSoftwareResponse", gopacket.DecodeFunc(decodeCommitSoftwareResponse))
+	LayerTypeSynchronizeTimeResponse = mkRespLayer(me.SynchronizeTime, "SynchronizeTimeResponse", gopacket.DecodeFunc(decodeSynchronizeTimeResponse))
+	LayerTypeRebootResponse = mkRespLayer(me.Reboot, "RebootResponse", gopacket.DecodeFunc(decodeRebootResponse))
+	LayerTypeGetNextResponse = mkRespLayer(me.GetNext, "GetNextResponse", gopacket.DecodeFunc(decodeGetNextResponse))
+	LayerTypeTestResult = mkRespLayer(me.TestResult, "TestResult", gopacket.DecodeFunc(decodeTestResult))
+	LayerTypeGetCurrentDataResponse = mkRespLayer(me.GetCurrentData, "GetCurrentDataResponse", gopacket.DecodeFunc(decodeGetCurrentDataResponse))
+	LayerTypeSetTableResponse = mkRespLayer(me.SetTable, "SetTableResponse", gopacket.DecodeFunc(decodeSetTableResponse))
+
+	// Map message_type and action to layer
+	nextLayerMapping = make(map[MessageType]gopacket.LayerType)
+
+	nextLayerMapping[CreateRequestType] = LayerTypeCreateRequest
+	nextLayerMapping[DeleteRequestType] = LayerTypeDeleteRequest
+	nextLayerMapping[SetRequestType] = LayerTypeSetRequest
+	nextLayerMapping[GetRequestType] = LayerTypeGetRequest
+	nextLayerMapping[GetAllAlarmsRequestType] = LayerTypeGetAllAlarmsRequest
+	nextLayerMapping[GetAllAlarmsNextRequestType] = LayerTypeGetAllAlarmsNextRequest
+	nextLayerMapping[MibUploadRequestType] = LayerTypeMibUploadRequest
+	nextLayerMapping[MibUploadNextRequestType] = LayerTypeMibUploadNextRequest
+	nextLayerMapping[MibResetRequestType] = LayerTypeMibResetRequest
+	nextLayerMapping[TestRequestType] = LayerTypeTestRequest
+	nextLayerMapping[StartSoftwareDownloadRequestType] = LayerTypeStartSoftwareDownloadRequest
+	nextLayerMapping[DownloadSectionRequestType] = LayerTypeDownloadSectionRequest
+	nextLayerMapping[EndSoftwareDownloadRequestType] = LayerTypeEndSoftwareDownloadRequest
+	nextLayerMapping[ActivateSoftwareRequestType] = LayerTypeActivateSoftwareRequest
+	nextLayerMapping[CommitSoftwareRequestType] = LayerTypeCommitSoftwareRequest
+	nextLayerMapping[SynchronizeTimeRequestType] = LayerTypeSynchronizeTimeRequest
+	nextLayerMapping[RebootRequestType] = LayerTypeRebootRequest
+	nextLayerMapping[GetNextRequestType] = LayerTypeGetNextRequest
+	nextLayerMapping[GetCurrentDataRequestType] = LayerTypeGetCurrentDataRequest
+	nextLayerMapping[SetTableRequestType] = LayerTypeSetTableRequest
+
+	nextLayerMapping[CreateResponseType] = LayerTypeCreateResponse
+	nextLayerMapping[DeleteResponseType] = LayerTypeDeleteResponse
+	nextLayerMapping[SetResponseType] = LayerTypeSetResponse
+	nextLayerMapping[GetResponseType] = LayerTypeGetResponse
+	nextLayerMapping[GetAllAlarmsResponseType] = LayerTypeGetAllAlarmsResponse
+	nextLayerMapping[GetAllAlarmsNextResponseType] = LayerTypeGetAllAlarmsNextResponse
+	nextLayerMapping[MibUploadResponseType] = LayerTypeMibUploadResponse
+	nextLayerMapping[MibUploadNextResponseType] = LayerTypeMibUploadNextResponse
+	nextLayerMapping[MibResetResponseType] = LayerTypeMibResetResponse
+	nextLayerMapping[TestResponseType] = LayerTypeTestResponse
+	nextLayerMapping[StartSoftwareDownloadResponseType] = LayerTypeStartSoftwareDownloadResponse
+	nextLayerMapping[DownloadSectionResponseType] = LayerTypeDownloadSectionResponse
+	nextLayerMapping[EndSoftwareDownloadResponseType] = LayerTypeEndSoftwareDownloadResponse
+	nextLayerMapping[ActivateSoftwareResponseType] = LayerTypeActivateSoftwareResponse
+	nextLayerMapping[CommitSoftwareResponseType] = LayerTypeCommitSoftwareResponse
+	nextLayerMapping[SynchronizeTimeResponseType] = LayerTypeSynchronizeTimeResponse
+	nextLayerMapping[RebootResponseType] = LayerTypeRebootResponse
+	nextLayerMapping[GetNextResponseType] = LayerTypeGetNextResponse
+	nextLayerMapping[GetCurrentDataResponseType] = LayerTypeGetCurrentDataResponse
+	nextLayerMapping[SetTableResponseType] = LayerTypeSetTableResponse
+
+	nextLayerMapping[AttributeValueChangeType] = LayerTypeAttributeValueChange
+	nextLayerMapping[AlarmNotificationType] = LayerTypeAlarmNotification
+	nextLayerMapping[TestResultType] = LayerTypeTestResult
+}
+
+func MsgTypeToNextLayer(mt MessageType) (gopacket.LayerType, error) {
+	nextLayer, ok := nextLayerMapping[mt]
+	if ok {
+		return nextLayer, nil
+	}
+	return gopacket.LayerTypeZero, errors.New("unknown message type")
+}
diff --git a/vendor/github.com/cboling/omci/mebase.go b/vendor/github.com/cboling/omci/mebase.go
new file mode 100644
index 0000000..7692bbf
--- /dev/null
+++ b/vendor/github.com/cboling/omci/mebase.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package omci
+
+import (
+	"encoding/binary"
+	"fmt"
+	me "github.com/cboling/omci/generated"
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+)
+
+type MeBasePacket struct {
+	EntityClass    me.ClassID
+	EntityInstance uint16
+
+	gopacket.Layer
+	layers.BaseLayer
+	MsgLayerType gopacket.LayerType
+}
+
+func (msg *MeBasePacket) String() string {
+	return fmt.Sprintf("ClassID: %v, InstanceId: %d/%#x",
+		msg.EntityClass, msg.EntityInstance, msg.EntityInstance)
+}
+
+func (msg *MeBasePacket) CanDecode() gopacket.LayerClass {
+	return msg.MsgLayerType
+}
+
+// Layer Interface implementations
+func (msg *MeBasePacket) LayerType() gopacket.LayerType {
+	return msg.MsgLayerType
+}
+func (msg *MeBasePacket) LayerContents() []byte {
+	return msg.Contents
+}
+func (msg *MeBasePacket) LayerPayload() []byte {
+	return msg.Payload
+}
+
+// layerDecodingLayer Interface implementations
+func (msg *MeBasePacket) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+func (msg *MeBasePacket) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Note: Base OMCI frame already checked for frame with at least 10 octets
+	msg.EntityClass = me.ClassID(binary.BigEndian.Uint16(data[0:]))
+	msg.EntityInstance = binary.BigEndian.Uint16(data[2:])
+	msg.BaseLayer = layers.BaseLayer{Contents: data[:4], Payload: data[4:]}
+	return nil
+}
+func (msg *MeBasePacket) SerializeTo(b gopacket.SerializeBuffer) error {
+	// Add class ID and entity ID
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(msg.EntityClass))
+	binary.BigEndian.PutUint16(bytes[2:], msg.EntityInstance)
+	return nil
+}
+
+type layerDecodingLayer interface {
+	gopacket.Layer
+	DecodeFromBytes([]byte, gopacket.PacketBuilder) error
+	NextLayerType() gopacket.LayerType
+}
+
+func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error {
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	next := d.NextLayerType()
+	if next == gopacket.LayerTypeZero {
+		return nil
+	}
+	return p.NextDecoder(next)
+}
diff --git a/vendor/github.com/cboling/omci/meframe.go b/vendor/github.com/cboling/omci/meframe.go
new file mode 100644
index 0000000..c42e091
--- /dev/null
+++ b/vendor/github.com/cboling/omci/meframe.go
@@ -0,0 +1,1358 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * NOTE: This file was generated, manual edits will be overwritten!
+ *
+ * Generated by 'goCodeGenerator.py':
+ *              https://github.com/cboling/OMCI-parser/README.md
+ */
+package omci
+
+import (
+	"errors"
+	"fmt"
+	me "github.com/cboling/omci/generated"
+	"github.com/deckarep/golang-set"
+	"github.com/google/gopacket"
+	"time"
+)
+
+var encoderMap map[MessageType]func(*me.ManagedEntity, options) (gopacket.SerializableLayer, error)
+
+func init() {
+	encoderMap = make(map[MessageType]func(*me.ManagedEntity, options) (gopacket.SerializableLayer, error))
+
+	encoderMap[CreateRequestType] = CreateRequestFrame
+	encoderMap[DeleteRequestType] = DeleteRequestFrame
+	encoderMap[SetRequestType] = SetRequestFrame
+	encoderMap[GetRequestType] = GetRequestFrame
+	encoderMap[GetAllAlarmsRequestType] = GetAllAlarmsRequestFrame
+	encoderMap[GetAllAlarmsNextRequestType] = GetAllAlarmsNextRequestFrame
+	encoderMap[MibUploadRequestType] = MibUploadRequestFrame
+	encoderMap[MibUploadNextRequestType] = MibUploadNextRequestFrame
+	encoderMap[MibResetRequestType] = MibResetRequestFrame
+	encoderMap[TestRequestType] = TestRequestFrame
+	encoderMap[StartSoftwareDownloadRequestType] = StartSoftwareDownloadRequestFrame
+	encoderMap[DownloadSectionRequestType] = DownloadSectionRequestFrame
+	encoderMap[EndSoftwareDownloadRequestType] = EndSoftwareDownloadRequestFrame
+	encoderMap[ActivateSoftwareRequestType] = ActivateSoftwareRequestFrame
+	encoderMap[CommitSoftwareRequestType] = CommitSoftwareRequestFrame
+	encoderMap[SynchronizeTimeRequestType] = SynchronizeTimeRequestFrame
+	encoderMap[RebootRequestType] = RebootRequestFrame
+	encoderMap[GetNextRequestType] = GetNextRequestFrame
+	encoderMap[GetCurrentDataRequestType] = GetCurrentDataRequestFrame
+	encoderMap[SetTableRequestType] = SetTableRequestFrame
+	encoderMap[CreateResponseType] = CreateResponseFrame
+	encoderMap[DeleteResponseType] = DeleteResponseFrame
+	encoderMap[SetResponseType] = SetResponseFrame
+	encoderMap[GetResponseType] = GetResponseFrame
+	encoderMap[GetAllAlarmsResponseType] = GetAllAlarmsResponseFrame
+	encoderMap[GetAllAlarmsNextResponseType] = GetAllAlarmsNextResponseFrame
+	encoderMap[MibUploadResponseType] = MibUploadResponseFrame
+	encoderMap[MibUploadNextResponseType] = MibUploadNextResponseFrame
+	encoderMap[MibResetResponseType] = MibResetResponseFrame
+	encoderMap[TestResponseType] = TestResponseFrame
+	encoderMap[StartSoftwareDownloadResponseType] = StartSoftwareDownloadResponseFrame
+	encoderMap[DownloadSectionResponseType] = DownloadSectionResponseFrame
+	encoderMap[EndSoftwareDownloadResponseType] = EndSoftwareDownloadResponseFrame
+	encoderMap[ActivateSoftwareResponseType] = ActivateSoftwareResponseFrame
+	encoderMap[CommitSoftwareResponseType] = CommitSoftwareResponseFrame
+	encoderMap[SynchronizeTimeResponseType] = SynchronizeTimeResponseFrame
+	encoderMap[RebootResponseType] = RebootResponseFrame
+	encoderMap[GetNextResponseType] = GetNextResponseFrame
+	encoderMap[GetCurrentDataResponseType] = GetCurrentDataResponseFrame
+	encoderMap[SetTableResponseType] = SetTableResponseFrame
+	encoderMap[AlarmNotificationType] = AlarmNotificationFrame
+	encoderMap[AttributeValueChangeType] = AttributeValueChangeFrame
+	encoderMap[TestResultType] = TestResultFrame
+}
+
+type options struct {
+	frameFormat               DeviceIdent
+	failIfTruncated           bool
+	attributeMask             uint16
+	result                    me.Results      // Common for many responses
+	attrExecutionMask         uint16          // Create Response Only if results == 3 or Set Response only if results == 0
+	unsupportedMask           uint16          // Set Response only if results == 9
+	sequenceNumberCountOrSize uint16          // For get-next request frames and for frames that return number of commands or length
+	transactionID             uint16          // OMCI TID
+	mode                      uint8           // Get All Alarms retrieval mode
+	alarm                     AlarmOptions    // Alarm related frames
+	software                  SoftwareOptions // Software image related frames
+	payload                   interface{}     // ME or list of MEs, alarm bitmap, timestamp, ...
+}
+
+var defaultFrameOptions = options{
+	frameFormat:               BaselineIdent,
+	failIfTruncated:           false,
+	attributeMask:             0xFFFF,
+	result:                    me.Success,
+	attrExecutionMask:         0,
+	unsupportedMask:           0,
+	sequenceNumberCountOrSize: 0,
+	transactionID:             0,
+	mode:                      0,
+	software:                  defaultSoftwareOptions,
+	alarm:                     defaultAlarmOptions,
+	payload:                   nil,
+}
+
+// FrameOption sets options such as frame format, etc.
+type FrameOption func(*options)
+
+// FrameFormat determines determines the OMCI message format used on the fiber.
+// The default value is BaselineIdent
+func FrameFormat(ff DeviceIdent) FrameOption {
+	return func(o *options) {
+		o.frameFormat = ff
+	}
+}
+
+// FailIfTruncated determines whether a request to encode a frame that does
+// not have enough room for all requested options should fail and return an
+// error.
+//
+// If set to 'false', the behaviour depends on the message type/operation
+// requested. The table below provides more information:
+//
+//   Request Type	Behavour
+//	 ------------------------------------------------------------------------
+//	 CreateRequest  A single CreateRequest struct is always returned as the
+//                  CreateRequest message does not have an attributes Mask
+//                  field and a Baseline OMCI message is large enough to
+//                  support all Set-By-Create attributes.
+//
+//   GetResponse	If multiple OMCI response frames are needed to return
+//					all requested attributes, only the attributes that can
+//					fit will be returned and the FailedAttributeMask field
+//					set to the attributes that could not be returned
+//
+//					If this is an ME with an attribute that is a table, the
+//					first GetResponse struct will return the size of the
+//					attribute and the following GetNextResponse structs will
+//					contain the attribute data. The ONU application is
+//					responsible for stashing these extra struct(s) away in
+//					anticipation of possible GetNext Requests occurring for
+//					the attribute.  See the discussion on Table attributes
+//					in the GetResponse section of ITU G.988 for more
+//					information.
+//
+// If set to 'true', no struct(s) are returned and an error is provided.
+//
+// The default value is 'false'
+func FailIfTruncated(f bool) FrameOption {
+	return func(o *options) {
+		o.failIfTruncated = f
+	}
+}
+
+// attributeMask determines the attributes to encode into the frame.
+// The default value is 0xFFFF which specifies all available attributes
+// in the frame
+func AttributeMask(m uint16) FrameOption {
+	return func(o *options) {
+		o.attributeMask = m
+	}
+}
+
+// AttributeExecutionMask is used by the Create and Set Response frames to indicate
+// attributes that failed to be created/set.
+func AttributeExecutionMask(m uint16) FrameOption {
+	return func(o *options) {
+		o.attrExecutionMask = m
+	}
+}
+
+// UnsupportedAttributeMask is used by the Set Response frames to indicate
+// attributes are not supported on this ONU
+func UnsupportedAttributeMask(m uint16) FrameOption {
+	return func(o *options) {
+		o.unsupportedMask = m
+	}
+}
+
+// Result is used to set returned results in responses
+// that have that field
+func Result(r me.Results) FrameOption {
+	return func(o *options) {
+		o.result = r
+	}
+}
+
+// SequenceNumberCountOrSize is used by the GetNext and MibUploadGetNext request frames and for
+// frames that return number of commands or length such as Get (table attribute) or
+// MibUpload/GetAllAlarms/...
+func SequenceNumberCountOrSize(m uint16) FrameOption {
+	return func(o *options) {
+		o.sequenceNumberCountOrSize = m
+	}
+}
+
+// TransactionID is to specify the TID in the OMCI header. The default is
+// zero which requires the caller to set it to the appropriate value if this
+// is not an autonomous ONU notification frame
+func TransactionID(tid uint16) FrameOption {
+	return func(o *options) {
+		o.transactionID = tid
+	}
+}
+
+// RetrievalMode is to specify the the Alarm Retrieval Mode in a GetAllAlarms Request
+func RetrievalMode(m uint8) FrameOption {
+	return func(o *options) {
+		o.mode = m
+	}
+}
+
+// SuccessResult is to specify the the SuccessResult for a SynchronizeTime Response
+func SuccessResult(m uint8) FrameOption {
+	return func(o *options) {
+		o.mode = m
+	}
+}
+
+// RebootCondition is to specify the the Reboot Condition for a ONU Reboot request
+func RebootCondition(m uint8) FrameOption {
+	return func(o *options) {
+		o.mode = m
+	}
+}
+
+// Alarm is used to specify a collection of options related to Alarm notifications
+func Alarm(ao AlarmOptions) FrameOption {
+	return func(o *options) {
+		o.alarm = ao
+	}
+}
+
+// Software is used to specify a collection of options related to Software image
+// manipulation
+func Software(so SoftwareOptions) FrameOption {
+	return func(o *options) {
+		o.software = so
+	}
+}
+
+// Payload is used to specify ME payload options that are not simple types. This
+// include the ME (list of MEs) to encode into a GetNextMibUpload response, the
+// alarm bitmap for alarm relates responses/notifications, alarm bitmaps, and
+// for specifying the download section data when performing Software Download.
+func Payload(p interface{}) FrameOption {
+	return func(o *options) {
+		o.payload = p
+	}
+}
+
+// Alarm related frames have a wide variety of settable values. Placing them
+// in a separate struct is mainly to keep the base options simple
+type AlarmOptions struct {
+	AlarmClassId  me.ClassID
+	AlarmInstance uint16
+	AlarmBitmap   []byte // Should be up to 58 octets
+}
+
+var defaultAlarmOptions = AlarmOptions{
+	AlarmClassId:  0,
+	AlarmInstance: 0,
+	AlarmBitmap:   nil,
+}
+
+// Software related frames have a wide variety of settable values. Placing them
+// in a separate struct is mainly to keep the base options simple
+type SoftwareOptions struct {
+	WindowSize   uint8 // Window size - 1
+	ImageSize    uint32
+	CircuitPacks []uint16 // slot (upper 8 bits) and instance (lower 8 bits)
+	Results      []downloadResults
+}
+
+var defaultSoftwareOptions = SoftwareOptions{
+	WindowSize:   0,
+	ImageSize:    0,
+	CircuitPacks: nil,
+	Results:      nil,
+}
+
+// EncodeFrame will encode the Managed Entity specific protocol struct and an
+// OMCILayer struct. This struct can be provided to the gopacket.SerializeLayers()
+// function to be serialized into a buffer for transmission.
+func EncodeFrame(m *me.ManagedEntity, messageType MessageType, opt ...FrameOption) (*OMCI, gopacket.SerializableLayer, error) {
+	// Check for message type support
+	msgType := me.MsgType(messageType & me.MsgTypeMask)
+	meDefinition := m.GetManagedEntityDefinition()
+
+	if !me.SupportsMsgType(meDefinition, msgType) {
+		msg := fmt.Sprintf("managed entity %v does not support %v Message-Type",
+			meDefinition.GetName(), msgType)
+		return nil, nil, errors.New(msg)
+	}
+	// Decode options
+	opts := defaultFrameOptions
+	for _, o := range opt {
+		o(&opts)
+	}
+	// TODO: If AttributesMask option passed in, check for deprecated options. Allow encoding option
+	//       that will ignore deprecated option.   Add additional in the get and set meframe_test,go
+	//       test functions to test this. Also have it test attribute name(s) to see if the attribute
+	//       is deprecated.  The OMCI-Parser will need to decode deprecated for us...
+	// Note: Transaction ID should be set before frame serialization
+	omci := &OMCI{
+		TransactionID:    opts.transactionID,
+		MessageType:      messageType,
+		DeviceIdentifier: opts.frameFormat,
+	}
+	var meInfo gopacket.SerializableLayer
+	var err error
+
+	if encoder, ok := encoderMap[messageType]; ok {
+		meInfo, err = encoder(m, opts)
+	} else {
+		err = errors.New(fmt.Sprintf("message-type: %v/%#x is not supported", messageType, messageType))
+	}
+	if err != nil {
+		return nil, nil, err
+	}
+	return omci, meInfo, err
+}
+
+// For most all create methods below, error checking for valid masks, attribute
+// values, and other fields is left to when the frame is actually serialized.
+
+func checkAttributeMask(m *me.ManagedEntity, mask uint16) (uint16, error) {
+	if mask == defaultFrameOptions.attributeMask {
+		// Scale back to just what is allowed
+		return m.GetAllowedAttributeMask(), nil
+	}
+	if mask&m.GetManagedEntityDefinition().GetAllowedAttributeMask() != mask {
+		return 0, errors.New("invalid attribute mask")
+	}
+	return mask & m.GetManagedEntityDefinition().GetAllowedAttributeMask(), nil
+}
+
+// return the maximum space that can be used by attributes
+func maxPacketAvailable(m *me.ManagedEntity, opt options) uint {
+	if opt.frameFormat == BaselineIdent {
+		// OMCI Header          - 4 octets
+		// Class ID/Instance ID - 4 octets
+		// Length field			- 4 octets
+		// MIC                  - 4 octets
+		return MaxBaselineLength - 16
+	}
+	// OMCI Header          - 4 octets
+	// Class ID/Instance ID - 4 octets
+	// Length field			- 4 octets
+	// MIC                  - 4 octets
+	return MaxExtendedLength - 16
+}
+
+func calculateAttributeMask(m *me.ManagedEntity, requestedMask uint16) (uint16, error) {
+	attrDefs := m.GetAttributeDefinitions()
+	var entityIdName string
+	if entry, ok := (*attrDefs)[0]; ok {
+		entityIdName = entry.GetName()
+	} else {
+		panic("unexpected error") // All attribute definition maps have an entity ID
+	}
+	attributeNames := make([]interface{}, 0)
+	for attrName := range *m.GetAttributeValueMap() {
+		if attrName == entityIdName {
+			continue // No mask for EntityID
+		}
+		attributeNames = append(attributeNames, attrName)
+	}
+	calculatedMask, err := me.GetAttributeBitmap(*attrDefs, mapset.NewSetWith(attributeNames...))
+
+	if err != nil {
+		return 0, err
+	}
+	return calculatedMask & requestedMask, nil
+}
+
+// GenFrame is a helper function to make tests a little easier to read.
+// For a real application, use the .../omci/generated/class.go 'New'
+// functions to create your Managed Entity and then use it to call the
+// EncodeFrame method.
+func GenFrame(meInstance *me.ManagedEntity, messageType MessageType, options ...FrameOption) ([]byte, error) {
+	omciLayer, msgLayer, err := EncodeFrame(meInstance, messageType, options...)
+	if err != nil {
+		return nil, err
+	}
+	// Serialize the frame and send it
+	var serializeOptions gopacket.SerializeOptions
+	serializeOptions.FixLengths = true
+
+	buffer := gopacket.NewSerializeBuffer()
+	err = gopacket.SerializeLayers(buffer, serializeOptions, omciLayer, msgLayer)
+	if err != nil {
+		return nil, err
+	}
+	return buffer.Bytes(), nil
+}
+
+func CreateRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// NOTE: The OMCI parser does not extract the default values of set-by-create attributes
+	//       and are the zero 'default' (or nil) at this time.  For this reason, make sure
+	//       you specify all non-zero default values and pass them in appropriate
+	meLayer := &CreateRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Attributes: *m.GetAttributeValueMap(),
+	}
+	return meLayer, nil
+}
+
+func CreateResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	meLayer := &CreateResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result:                 opt.result,
+		AttributeExecutionMask: opt.attributeMask,
+	}
+	if meLayer.Result == me.ParameterError {
+		meLayer.AttributeExecutionMask = opt.attrExecutionMask
+	}
+	return meLayer, nil
+}
+
+func DeleteRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	meLayer := &DeleteRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	return meLayer, nil
+}
+
+func DeleteResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	meLayer := &DeleteResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result: opt.result,
+	}
+	return meLayer, nil
+}
+
+func SetRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	mask, err = calculateAttributeMask(m, mask)
+	if err != nil {
+		return nil, err
+	}
+	meDefinition := m.GetManagedEntityDefinition()
+	attrDefs := *meDefinition.GetAttributeDefinitions()
+	attrMap := *m.GetAttributeValueMap()
+
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+	payloadAvailable := int(maxPayload) - 2 // Less attribute mask
+
+	meLayer := &SetRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AttributeMask: 0,
+		Attributes:    make(me.AttributeValueMap),
+	}
+	for mask != 0 {
+		// Iterate down the attributes (Attribute 0 is the ManagedEntity ID)
+		var attrIndex uint
+		for attrIndex = 1; attrIndex <= 16; attrIndex++ {
+			// Is this attribute requested
+			if mask&(1<<(16-attrIndex)) != 0 {
+				// Get definitions since we need the name
+				attrDef, ok := attrDefs[attrIndex]
+				if !ok {
+					msg := fmt.Sprintf("Unexpected error, index %v not valued for ME %v",
+						attrIndex, meDefinition.GetName())
+					return nil, errors.New(msg)
+				}
+				var attrValue interface{}
+				attrValue, ok = attrMap[attrDef.Name]
+				if !ok {
+					msg := fmt.Sprintf("Unexpected error, attribute %v not provided in ME %v: %v",
+						attrDef.GetName(), meDefinition.GetName(), m)
+					return nil, errors.New(msg)
+				}
+				// Is space available?
+				if attrDef.Size <= payloadAvailable {
+					// Mark bit handled
+					mask &= ^(1 << (16 - attrIndex))
+					meLayer.AttributeMask |= 1 << (16 - attrIndex)
+					meLayer.Attributes[attrDef.Name] = attrValue
+					payloadAvailable -= attrDef.Size
+				} else {
+					// TODO: Should we set truncate?
+					msg := fmt.Sprintf("out-of-space. Cannot fit attribute %v into SetRequest message",
+						attrDef.GetName())
+					return nil, me.NewMessageTruncatedError(msg)
+				}
+			}
+		}
+	}
+	if err == nil && meLayer.AttributeMask == 0 {
+		// TODO: Is a set request with no attributes valid?
+		return nil, errors.New("no attributes encoded for SetRequest")
+	}
+	return meLayer, nil
+}
+
+func SetResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	meLayer := &SetResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result: opt.result,
+	}
+	if meLayer.Result == me.AttributeFailure {
+		meLayer.UnsupportedAttributeMask = opt.unsupportedMask
+		meLayer.FailedAttributeMask = opt.attrExecutionMask
+	}
+	return meLayer, nil
+}
+
+func GetRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Given mask sent in (could be default of 0xFFFF) get what is allowable.
+	// This will be all allowed if 0xFFFF is passed in, or a subset if a fixed
+	// number of items.
+	maxMask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Now scan attributes and reduce mask to only those requested
+	var mask uint16
+	mask, err = calculateAttributeMask(m, maxMask)
+	if err != nil {
+		return nil, err
+	}
+	if mask == 0 {
+		// TODO: Is a Get request with no attributes valid?
+		return nil, errors.New("no attributes requested for GetRequest")
+	}
+	meLayer := &GetRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AttributeMask: mask,
+	}
+	return meLayer, nil
+}
+
+func GetResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	mask, err = calculateAttributeMask(m, mask)
+	if err != nil {
+		return nil, err
+	}
+	meLayer := &GetResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result:        opt.result,
+		AttributeMask: 0,
+		Attributes:    make(me.AttributeValueMap),
+	}
+	if meLayer.Result == me.AttributeFailure {
+		meLayer.UnsupportedAttributeMask = opt.unsupportedMask
+		meLayer.FailedAttributeMask = opt.attrExecutionMask
+	}
+	// Encode whatever we can
+	if meLayer.Result == me.Success || meLayer.Result == me.AttributeFailure {
+		// Encode results
+		// Get payload space available
+		maxPayload := maxPacketAvailable(m, opt)
+		payloadAvailable := int(maxPayload) - 2 - 4 // Less attribute mask and attribute error encoding
+		meDefinition := m.GetManagedEntityDefinition()
+		attrDefs := *meDefinition.GetAttributeDefinitions()
+		attrMap := *m.GetAttributeValueMap()
+
+		if mask != 0 {
+			// Iterate down the attributes (Attribute 0 is the ManagedEntity ID)
+			var attrIndex uint
+			for attrIndex = 1; attrIndex <= 16; attrIndex++ {
+				// Is this attribute requested
+				if mask&(1<<(16-attrIndex)) != 0 {
+					// Get definitions since we need the name
+					attrDef, ok := attrDefs[attrIndex]
+					if !ok {
+						msg := fmt.Sprintf("Unexpected error, index %v not valued for ME %v",
+							attrIndex, meDefinition.GetName())
+						return nil, errors.New(msg)
+					}
+					var attrValue interface{}
+					attrValue, ok = attrMap[attrDef.Name]
+					if !ok {
+						msg := fmt.Sprintf("Unexpected error, attribute %v not provided in ME %v: %v",
+							attrDef.GetName(), meDefinition.GetName(), m)
+						return nil, errors.New(msg)
+					}
+					// Is space available?
+					if attrDef.Size <= payloadAvailable {
+						// Mark bit handled
+						mask &= ^(1 << (16 - attrIndex))
+						meLayer.AttributeMask |= 1 << (16 - attrIndex)
+						meLayer.Attributes[attrDef.Name] = attrValue
+						payloadAvailable -= attrDef.Size
+
+					} else if opt.failIfTruncated {
+						// TODO: Should we set truncate?
+						msg := fmt.Sprintf("out-of-space. Cannot fit attribute %v into GetResponse message",
+							attrDef.GetName())
+						return nil, me.NewMessageTruncatedError(msg)
+					} else {
+						// Add to existing 'failed' mask and update result
+						meLayer.FailedAttributeMask |= 1 << (16 - attrIndex)
+						meLayer.Result = me.AttributeFailure
+					}
+				}
+			}
+		}
+	}
+	return meLayer, nil
+}
+
+func GetAllAlarmsRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &GetAllAlarmsRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AlarmRetrievalMode: opt.mode,
+	}
+	return meLayer, nil
+}
+
+func GetAllAlarmsResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &GetAllAlarmsResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		NumberOfCommands: opt.sequenceNumberCountOrSize,
+	}
+	return meLayer, nil
+}
+
+func GetAllAlarmsNextRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &GetAllAlarmsNextRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		CommandSequenceNumber: opt.sequenceNumberCountOrSize,
+	}
+	return meLayer, nil
+}
+
+func GetAllAlarmsNextResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &GetAllAlarmsNextResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AlarmEntityClass:    opt.alarm.AlarmClassId,
+		AlarmEntityInstance: opt.alarm.AlarmInstance,
+	}
+	if len(opt.alarm.AlarmBitmap) > 28 {
+		return nil, errors.New("invalid Alarm Bitmap Size. Must be [0..27]")
+	}
+	for octet := 0; octet < len(opt.alarm.AlarmBitmap); octet++ {
+		meLayer.AlarmBitMap[octet] = opt.alarm.AlarmBitmap[octet]
+	}
+	for octet := len(opt.alarm.AlarmBitmap); octet < 28; octet++ {
+		meLayer.AlarmBitMap[octet] = 0
+	}
+	return meLayer, nil
+}
+
+func MibUploadRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibUploadRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: 0,
+		},
+	}
+	return meLayer, nil
+}
+
+func MibUploadResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibUploadResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: 0,
+		},
+		NumberOfCommands: opt.sequenceNumberCountOrSize,
+	}
+	return meLayer, nil
+}
+
+func MibUploadNextRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibUploadNextRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: 0,
+		},
+		CommandSequenceNumber: opt.sequenceNumberCountOrSize,
+	}
+	return meLayer, nil
+}
+
+func MibUploadNextResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibUploadNextResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	if opt.payload == nil {
+		// Shortcut used to specify the request sequence number is out of range, encode
+		// a ME instance with class ID of zero to specify this per ITU G.988
+		meDef := &me.ManagedEntityDefinition{
+			Name:                 "InvalidSequenceNumberManagedEntity",
+			ClassID:              me.ClassID(0),
+			MessageTypes:         nil,
+			AttributeDefinitions: make(me.AttributeDefinitionMap),
+		}
+		opt.payload, _ = me.NewManagedEntity(meDef)
+	}
+	if _, ok := opt.payload.(*[]me.ManagedEntity); ok {
+		if opt.frameFormat == BaselineIdent {
+			return nil, errors.New("invalid payload for Baseline message")
+		}
+		// TODO: List of MEs. valid for extended messages only
+	} else if managedEntity, ok := opt.payload.(*me.ManagedEntity); ok {
+		// Single ME
+		meLayer.ReportedME = *managedEntity
+	} else {
+		return nil, errors.New("invalid payload for MibUploadNextResponse frame")
+	}
+	return meLayer, nil
+}
+
+func MibResetRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibResetRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	return meLayer, nil
+}
+
+func MibResetResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &MibResetResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result: opt.result,
+	}
+	return meLayer, nil
+}
+
+func AlarmNotificationFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &AlarmNotificationMsg{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+	payloadAvailable := int(maxPayload) - 1 // Less alarm sequence number
+
+	// TODO: Lots of work to do
+	fmt.Println(mask, maxPayload, payloadAvailable)
+
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func AttributeValueChangeFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &AttributeValueChangeMsg{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AttributeMask: 0,
+		Attributes:    make(me.AttributeValueMap),
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+	payloadAvailable := int(maxPayload) - 2 // Less attribute mask
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload, payloadAvailable)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func TestRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &TestRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func TestResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &TestResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func StartSoftwareDownloadRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &StartSoftwareDownloadRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		WindowSize:           opt.software.WindowSize,
+		ImageSize:            opt.software.ImageSize,
+		NumberOfCircuitPacks: byte(len(opt.software.CircuitPacks)),
+		CircuitPacks:         opt.software.CircuitPacks,
+	}
+	// TODO: Add length check to insure we do not exceed maximum packet size
+	// payloadAvailable := int(maxPacketAvailable(m, opt))
+	payloadAvailable := 2
+	sizeNeeded := 1
+	if sizeNeeded > payloadAvailable {
+		// TODO: Should we set truncate?
+		msg := "out-of-space. Cannot fit Circuit Pack instances into Start Software Download Request message"
+		return nil, me.NewMessageTruncatedError(msg)
+	}
+	return meLayer, nil
+}
+
+func StartSoftwareDownloadResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &StartSoftwareDownloadResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		WindowSize:        opt.software.WindowSize,
+		NumberOfInstances: byte(len(opt.software.CircuitPacks)),
+		MeResults:         opt.software.Results,
+	}
+	// TODO: Add length check to insure we do not exceed maximum packet size
+	// payloadAvailable := int(maxPacketAvailable(m, opt))
+	payloadAvailable := 2
+	sizeNeeded := 1
+	if sizeNeeded > payloadAvailable {
+		// TODO: Should we set truncate?
+		msg := "out-of-space. Cannot fit Results  into Start Software Download Response message"
+		return nil, me.NewMessageTruncatedError(msg)
+	}
+	return meLayer, nil
+}
+
+func DownloadSectionRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &DownloadSectionRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func DownloadSectionResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &DownloadSectionResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func EndSoftwareDownloadRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &EndSoftwareDownloadRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func EndSoftwareDownloadResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &EndSoftwareDownloadResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func ActivateSoftwareRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &ActivateSoftwareRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func ActivateSoftwareResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &ActivateSoftwareResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func CommitSoftwareRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &CommitSoftwareRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func CommitSoftwareResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &CommitSoftwareResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func SynchronizeTimeRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &SynchronizeTimeRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Decode payload option. If nil, no timestamp provided
+	if timestamp, ok := opt.payload.(int64); ok {
+		tm := time.Unix(timestamp, 0)
+		meLayer.Year = uint16(tm.UTC().Year())
+		meLayer.Month = uint8(tm.UTC().Month())
+		meLayer.Day = uint8(tm.UTC().Day())
+		meLayer.Hour = uint8(tm.UTC().Hour())
+		meLayer.Minute = uint8(tm.UTC().Minute())
+		meLayer.Second = uint8(tm.UTC().Second())
+	}
+	return meLayer, nil
+}
+
+func SynchronizeTimeResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &SynchronizeTimeResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result:         opt.result,
+		SuccessResults: opt.mode,
+	}
+	return meLayer, nil
+}
+
+func RebootRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &RebootRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		RebootCondition: opt.mode,
+	}
+	return meLayer, nil
+}
+
+func RebootResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Common for all MEs
+	meLayer := &RebootResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result: opt.result,
+	}
+	return meLayer, nil
+}
+
+func GetNextRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Validate attribute mask
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Now scan attributes and reduce mask to only those requested
+	mask, err = calculateAttributeMask(m, mask)
+	if err != nil {
+		return nil, err
+	}
+	if mask == 0 {
+		return nil, errors.New("no attributes requested for GetNextRequest")
+	}
+	// TODO: If more than one attribute or the attribute requested is not a table attribute, return an error
+	// Common for all MEs
+	meLayer := &GetNextRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		AttributeMask:  mask,
+		SequenceNumber: opt.sequenceNumberCountOrSize,
+	}
+	return meLayer, nil
+}
+
+func GetNextResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	// Validate attribute mask
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	mask, err = calculateAttributeMask(m, mask)
+	if err != nil {
+		return nil, err
+	}
+	//
+	// Common for all MEs
+	meLayer := &GetNextResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+		Result:        opt.result,
+		AttributeMask: 0,
+		Attributes:    make(me.AttributeValueMap),
+	}
+	if meLayer.Result == me.Success {
+		// Get payload space available
+		maxPayload := maxPacketAvailable(m, opt)
+		payloadAvailable := int(maxPayload) - 3 // Less results and attribute mask
+		meDefinition := m.GetManagedEntityDefinition()
+		attrDefs := *meDefinition.GetAttributeDefinitions()
+		attrMap := *m.GetAttributeValueMap()
+
+		if mask == 0 {
+			return nil, errors.New("no attributes requested for GetNextResponse")
+		}
+		// TODO: If more than one attribute or the attribute requested is not a table attribute, return an error
+		// Iterate down the attributes (Attribute 0 is the ManagedEntity ID)
+		var attrIndex uint
+		for attrIndex = 1; attrIndex <= 16; attrIndex++ {
+			// Is this attribute requested
+			if mask&(1<<(16-attrIndex)) != 0 {
+				// Get definitions since we need the name
+				attrDef, ok := attrDefs[attrIndex]
+				if !ok {
+					msg := fmt.Sprintf("Unexpected error, index %v not valued for ME %v",
+						attrIndex, meDefinition.GetName())
+					return nil, errors.New(msg)
+				}
+				var attrValue interface{}
+				attrValue, ok = attrMap[attrDef.Name]
+				if !ok || attrValue == nil {
+					msg := fmt.Sprintf("Unexpected error, attribute %v not provided in ME %v: %v",
+						attrDef.GetName(), meDefinition.GetName(), m)
+					return nil, errors.New(msg)
+				}
+				// Is space available?
+				if attrDef.Size <= payloadAvailable {
+					// Mark bit handled
+					mask &= ^(1 << (16 - attrIndex))
+					meLayer.AttributeMask |= 1 << (16 - attrIndex)
+					meLayer.Attributes[attrDef.Name] = attrValue
+					payloadAvailable -= attrDef.Size
+				} else {
+					// TODO: Should we set truncate?
+					msg := fmt.Sprintf("out-of-space. Cannot fit attribute %v into GetNextResponse message",
+						attrDef.GetName())
+					return nil, me.NewMessageTruncatedError(msg)
+				}
+			}
+		}
+	}
+	return meLayer, nil
+}
+
+func TestResultFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &TestResultMsg{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func GetCurrentDataRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &GetCurrentDataRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func GetCurrentDataResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &GetCurrentDataResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func SetTableRequestFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	if opt.frameFormat != ExtendedIdent {
+		return nil, errors.New("SetTable message type only supported with Extended OMCI Messaging")
+	}
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &SetTableRequest{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
+
+func SetTableResponseFrame(m *me.ManagedEntity, opt options) (gopacket.SerializableLayer, error) {
+	if opt.frameFormat != ExtendedIdent {
+		return nil, errors.New("SetTable message type only supported with Extended OMCI Messaging")
+	}
+	mask, err := checkAttributeMask(m, opt.attributeMask)
+	if err != nil {
+		return nil, err
+	}
+	// Common for all MEs
+	meLayer := &SetTableResponse{
+		MeBasePacket: MeBasePacket{
+			EntityClass:    m.GetClassID(),
+			EntityInstance: m.GetEntityID(),
+		},
+	}
+	// Get payload space available
+	maxPayload := maxPacketAvailable(m, opt)
+
+	// TODO: Lots of work to do
+
+	fmt.Println(mask, maxPayload)
+	return meLayer, errors.New("todo: Not implemented")
+}
diff --git a/vendor/github.com/cboling/omci/messagetypes.go b/vendor/github.com/cboling/omci/messagetypes.go
new file mode 100644
index 0000000..8ac4dde
--- /dev/null
+++ b/vendor/github.com/cboling/omci/messagetypes.go
@@ -0,0 +1,3472 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package omci
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	me "github.com/cboling/omci/generated"
+	"github.com/google/gopacket"
+)
+
+// MessageType is the OMCI Message Type or'ed with the AR/AK flags as appropriate.
+type MessageType byte
+
+const (
+	CreateRequestType                 = MessageType(byte(me.Create) | me.AR)
+	CreateResponseType                = MessageType(byte(me.Create) | me.AK)
+	DeleteRequestType                 = MessageType(byte(me.Delete) | me.AR)
+	DeleteResponseType                = MessageType(byte(me.Delete) | me.AK)
+	SetRequestType                    = MessageType(byte(me.Set) | me.AR)
+	SetResponseType                   = MessageType(byte(me.Set) | me.AK)
+	GetRequestType                    = MessageType(byte(me.Get) | me.AR)
+	GetResponseType                   = MessageType(byte(me.Get) | me.AK)
+	GetAllAlarmsRequestType           = MessageType(byte(me.GetAllAlarms) | me.AR)
+	GetAllAlarmsResponseType          = MessageType(byte(me.GetAllAlarms) | me.AK)
+	GetAllAlarmsNextRequestType       = MessageType(byte(me.GetAllAlarmsNext) | me.AR)
+	GetAllAlarmsNextResponseType      = MessageType(byte(me.GetAllAlarmsNext) | me.AK)
+	MibUploadRequestType              = MessageType(byte(me.MibUpload) | me.AR)
+	MibUploadResponseType             = MessageType(byte(me.MibUpload) | me.AK)
+	MibUploadNextRequestType          = MessageType(byte(me.MibUploadNext) | me.AR)
+	MibUploadNextResponseType         = MessageType(byte(me.MibUploadNext) | me.AK)
+	MibResetRequestType               = MessageType(byte(me.MibReset) | me.AR)
+	MibResetResponseType              = MessageType(byte(me.MibReset) | me.AK)
+	TestRequestType                   = MessageType(byte(me.Test) | me.AR)
+	TestResponseType                  = MessageType(byte(me.Test) | me.AK)
+	StartSoftwareDownloadRequestType  = MessageType(byte(me.StartSoftwareDownload) | me.AR)
+	StartSoftwareDownloadResponseType = MessageType(byte(me.StartSoftwareDownload) | me.AK)
+	DownloadSectionRequestType        = MessageType(byte(me.DownloadSection) | me.AR)
+	DownloadSectionResponseType       = MessageType(byte(me.DownloadSection) | me.AK)
+	EndSoftwareDownloadRequestType    = MessageType(byte(me.EndSoftwareDownload) | me.AR)
+	EndSoftwareDownloadResponseType   = MessageType(byte(me.EndSoftwareDownload) | me.AK)
+	ActivateSoftwareRequestType       = MessageType(byte(me.ActivateSoftware) | me.AR)
+	ActivateSoftwareResponseType      = MessageType(byte(me.ActivateSoftware) | me.AK)
+	CommitSoftwareRequestType         = MessageType(byte(me.CommitSoftware) | me.AR)
+	CommitSoftwareResponseType        = MessageType(byte(me.CommitSoftware) | me.AK)
+	SynchronizeTimeRequestType        = MessageType(byte(me.SynchronizeTime) | me.AR)
+	SynchronizeTimeResponseType       = MessageType(byte(me.SynchronizeTime) | me.AK)
+	RebootRequestType                 = MessageType(byte(me.Reboot) | me.AR)
+	RebootResponseType                = MessageType(byte(me.Reboot) | me.AK)
+	GetNextRequestType                = MessageType(byte(me.GetNext) | me.AR)
+	GetNextResponseType               = MessageType(byte(me.GetNext) | me.AK)
+	GetCurrentDataRequestType         = MessageType(byte(me.GetCurrentData) | me.AR)
+	GetCurrentDataResponseType        = MessageType(byte(me.GetCurrentData) | me.AK)
+	SetTableRequestType               = MessageType(byte(me.SetTable) | me.AR)
+	SetTableResponseType              = MessageType(byte(me.SetTable) | me.AK)
+	// Autonomous ONU messages
+	AlarmNotificationType    = MessageType(byte(me.AlarmNotification))
+	AttributeValueChangeType = MessageType(byte(me.AttributeValueChange))
+	TestResultType           = MessageType(byte(me.TestResult))
+)
+
+func (mt MessageType) String() string {
+	switch mt {
+	default:
+		return "Unknown"
+
+	case CreateRequestType:
+		return "Create Request"
+	case CreateResponseType:
+		return "Create Response"
+	case DeleteRequestType:
+		return "Delete Request"
+	case DeleteResponseType:
+		return "Delete Response"
+	case SetRequestType:
+		return "Set Request"
+	case SetResponseType:
+		return "Set Response"
+	case GetRequestType:
+		return "Get Request"
+	case GetResponseType:
+		return "Get Response"
+	case GetAllAlarmsRequestType:
+		return "Get All Alarms Request"
+	case GetAllAlarmsResponseType:
+		return "Get All Alarms Response"
+	case GetAllAlarmsNextRequestType:
+		return "Get All Alarms Next Request"
+	case GetAllAlarmsNextResponseType:
+		return "Get All Alarms Next Response"
+	case MibUploadRequestType:
+		return "MIB Upload Request"
+	case MibUploadResponseType:
+		return "MIB Upload Response"
+	case MibUploadNextRequestType:
+		return "MIB Upload Next Request"
+	case MibUploadNextResponseType:
+		return "MIB Upload Next Response"
+	case MibResetRequestType:
+		return "MIB Reset Request"
+	case MibResetResponseType:
+		return "MIB Reset Response"
+	case TestRequestType:
+		return "Test Request"
+	case TestResponseType:
+		return "Test Response"
+	case StartSoftwareDownloadRequestType:
+		return "Start Software Download Request"
+	case StartSoftwareDownloadResponseType:
+		return "Start Software Download Response"
+	case DownloadSectionRequestType:
+		return "Download Section Request"
+	case DownloadSectionResponseType:
+		return "Download Section Response"
+	case EndSoftwareDownloadRequestType:
+		return "End Software Download Request"
+	case EndSoftwareDownloadResponseType:
+		return "End Software Download Response"
+	case ActivateSoftwareRequestType:
+		return "Activate Software Request"
+	case ActivateSoftwareResponseType:
+		return "Activate Software Response"
+	case CommitSoftwareRequestType:
+		return "Commit Software Request"
+	case CommitSoftwareResponseType:
+		return "Commit Software Response"
+	case SynchronizeTimeRequestType:
+		return "Synchronize Time Request"
+	case SynchronizeTimeResponseType:
+		return "Synchronize Time Response"
+	case RebootRequestType:
+		return "Reboot Request"
+	case RebootResponseType:
+		return "Reboot Response"
+	case GetNextRequestType:
+		return "Get Next Request"
+	case GetNextResponseType:
+		return "Get Next Response"
+	case GetCurrentDataRequestType:
+		return "Get Current Data Request"
+	case GetCurrentDataResponseType:
+		return "Get Current Data Response"
+	case SetTableRequestType:
+		return "Set Table Request"
+	case SetTableResponseType:
+		return "Set Table Response"
+	case AlarmNotificationType:
+		return "Alarm Notification"
+	case AttributeValueChangeType:
+		return "Attribute Value Change"
+	case TestResultType:
+		return "Test Result"
+	}
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// CreateRequest
+type CreateRequest struct {
+	MeBasePacket
+	Attributes me.AttributeValueMap
+}
+
+func (omci *CreateRequest) String() string {
+	return fmt.Sprintf("%v, attributes: %v", omci.MeBasePacket.String(), omci.Attributes)
+}
+
+func (omci *CreateRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	// Create attribute mask for all set-by-create entries
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Create
+	if !me.SupportsMsgType(meDefinition, me.Create) {
+		return me.NewProcessingError("managed entity does not support Create Message-Type")
+	}
+	var sbcMask uint16
+	for index, attr := range *meDefinition.GetAttributeDefinitions() {
+		if me.SupportsAttributeAccess(attr, me.SetByCreate) {
+			if index == 0 {
+				continue // Skip Entity ID
+			}
+			sbcMask |= 1 << (15 - uint(index-1))
+		}
+	}
+	// Attribute decode
+	omci.Attributes, err = meDefinition.DecodeAttributes(sbcMask, data[4:], p, byte(CreateRequestType))
+	if err != nil {
+		return err
+	}
+	if eidDef, eidDefOK := (*meDefinition.GetAttributeDefinitions())[0]; eidDefOK {
+		omci.Attributes[eidDef.GetName()] = omci.EntityInstance
+		return nil
+	}
+	panic("All Managed Entities have an EntityID attribute")
+}
+
+func decodeCreateRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &CreateRequest{}
+	omci.MsgLayerType = LayerTypeCreateRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *CreateRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	var sbcMask uint16
+	for index, attr := range *meDefinition.GetAttributeDefinitions() {
+		if me.SupportsAttributeAccess(attr, me.SetByCreate) {
+			if index == 0 {
+				continue // Skip Entity ID
+			}
+			sbcMask |= 1 << (15 - uint(index-1))
+		}
+	}
+	// Attribute serialization
+	// TODO: Only Baseline supported at this time
+	bytesAvailable := MaxBaselineLength - 8 - 8
+	return meDefinition.SerializeAttributes(omci.Attributes, sbcMask, b, byte(CreateRequestType), bytesAvailable)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// CreateResponse
+type CreateResponse struct {
+	MeBasePacket
+	Result                 me.Results
+	AttributeExecutionMask uint16 // Used when Result == ParameterError
+}
+
+func (omci *CreateResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Mask: %#x",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.AttributeExecutionMask)
+}
+
+func (omci *CreateResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Create
+	if !me.SupportsMsgType(entity, me.Create) {
+		return me.NewProcessingError("managed entity does not support the Create Message-Type")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result == me.ParameterError {
+		omci.AttributeExecutionMask = binary.BigEndian.Uint16(data[5:])
+		// TODO: validation that attributes set in mask are SetByCreate would be good here
+	}
+	return nil
+}
+
+func decodeCreateResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &CreateResponse{}
+	omci.MsgLayerType = LayerTypeCreateResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *CreateResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Create
+	if !me.SupportsMsgType(entity, me.Create) {
+		return me.NewProcessingError("managed entity does not support the Create Message-Type")
+	}
+	bytes, err := b.AppendBytes(3)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	if omci.Result == me.ParameterError {
+		// TODO: validation that attributes set in mask are SetByCreate would be good here
+		binary.BigEndian.PutUint16(bytes[1:], omci.AttributeExecutionMask)
+	} else {
+		binary.BigEndian.PutUint16(bytes[1:], 0)
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// DeleteRequest
+type DeleteRequest struct {
+	MeBasePacket
+}
+
+func (omci *DeleteRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *DeleteRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Delete
+	if !me.SupportsMsgType(entity, me.Delete) {
+		return me.NewProcessingError("managed entity does not support the Delete Message-Type")
+	}
+	return nil
+}
+
+func decodeDeleteRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &DeleteRequest{}
+	omci.MsgLayerType = LayerTypeDeleteRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *DeleteRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Delete
+	if !me.SupportsMsgType(entity, me.Delete) {
+		return me.NewProcessingError("managed entity does not support the Delete Message-Type")
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// DeleteResponse
+type DeleteResponse struct {
+	MeBasePacket
+	Result me.Results
+}
+
+func (omci *DeleteResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v)",
+		omci.MeBasePacket.String(), omci.Result, omci.Result)
+}
+
+func (omci *DeleteResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Delete
+	if !me.SupportsMsgType(entity, me.Delete) {
+		return me.NewProcessingError("managed entity does not support the Delete Message-Type")
+	}
+	omci.Result = me.Results(data[4])
+	return nil
+}
+
+func decodeDeleteResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &DeleteResponse{}
+	omci.MsgLayerType = LayerTypeDeleteResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *DeleteResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Delete
+	if !me.SupportsMsgType(entity, me.Delete) {
+		return me.NewProcessingError("managed entity does not support the Delete Message-Type")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// SetRequest
+type SetRequest struct {
+	MeBasePacket
+	AttributeMask uint16
+	Attributes    me.AttributeValueMap
+}
+
+func (omci *SetRequest) String() string {
+	return fmt.Sprintf("%v, Mask: %#x, attributes: %v",
+		omci.MeBasePacket.String(), omci.AttributeMask, omci.Attributes)
+}
+
+func (omci *SetRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(meDefinition, me.Set) {
+		return me.NewProcessingError("managed entity does not support Set Message-Type")
+	}
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+
+	// Attribute decode
+	omci.Attributes, err = meDefinition.DecodeAttributes(omci.AttributeMask, data[6:], p, byte(SetRequestType))
+	if err != nil {
+		return err
+	}
+	// Validate all attributes support write
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Write) {
+			msg := fmt.Sprintf("attribute '%v' does not support write access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	if eidDef, eidDefOK := (*meDefinition.GetAttributeDefinitions())[0]; eidDefOK {
+		omci.Attributes[eidDef.GetName()] = omci.EntityInstance
+		return nil
+	}
+	panic("All Managed Entities have an EntityID attribute")
+}
+
+func decodeSetRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SetRequest{}
+	omci.MsgLayerType = LayerTypeSetRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SetRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(meDefinition, me.Set) {
+		return me.NewProcessingError("managed entity does not support Set Message-Type")
+	}
+	// Validate all attributes support write
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		// Do not test for write of Entity ID in the attribute list
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Write) {
+			// TODO: Check ITU spec to see if this should be listed as a failed
+			//       attribute and not a processing error.
+			msg := fmt.Sprintf("attribute '%v' does not support write access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.AttributeMask)
+
+	// Attribute serialization
+	// TODO: Only Baseline supported at this time
+	bytesAvailable := MaxBaselineLength - 10 - 8
+
+	return meDefinition.SerializeAttributes(omci.Attributes, omci.AttributeMask, b,
+		byte(SetRequestType), bytesAvailable)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// SetResponse
+type SetResponse struct {
+	MeBasePacket
+	Result                   me.Results
+	UnsupportedAttributeMask uint16
+	FailedAttributeMask      uint16
+}
+
+func (omci *SetResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Unsupported Mask: %#x, Failed Mask: %#x",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.UnsupportedAttributeMask,
+		omci.FailedAttributeMask)
+}
+
+func (omci *SetResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(entity, me.Set) {
+		return me.NewProcessingError("managed entity does not support the Delete Message-Type")
+	}
+	omci.Result = me.Results(data[4])
+
+	if omci.Result == me.AttributeFailure {
+		omci.UnsupportedAttributeMask = binary.BigEndian.Uint16(data[5:7])
+		omci.FailedAttributeMask = binary.BigEndian.Uint16(data[7:9])
+	}
+	return nil
+}
+
+func decodeSetResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SetResponse{}
+	omci.MsgLayerType = LayerTypeSetResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SetResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(entity, me.Set) {
+		return me.NewProcessingError("managed entity does not support the Set Message-Type")
+	}
+	bytes, err := b.AppendBytes(5)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	binary.BigEndian.PutUint16(bytes[1:3], omci.UnsupportedAttributeMask)
+	binary.BigEndian.PutUint16(bytes[3:5], omci.FailedAttributeMask)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetRequest
+type GetRequest struct {
+	MeBasePacket
+	AttributeMask uint16
+}
+
+func (omci *GetRequest) String() string {
+	return fmt.Sprintf("%v, Mask: %#x",
+		omci.MeBasePacket.String(), omci.AttributeMask)
+}
+func (omci *GetRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.Get) {
+		return me.NewProcessingError("managed entity does not support Get Message-Type")
+	}
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeGetRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetRequest{}
+	omci.MsgLayerType = LayerTypeGetRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(meDefinition, me.Get) {
+		return me.NewProcessingError("managed entity does not support Get Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.AttributeMask)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetResponse
+type GetResponse struct {
+	MeBasePacket
+	Result                   me.Results
+	AttributeMask            uint16
+	Attributes               me.AttributeValueMap
+	UnsupportedAttributeMask uint16
+	FailedAttributeMask      uint16
+}
+
+func (omci *GetResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Mask: %#x, Unsupported: %#x, Failed: %#x, attributes: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.AttributeMask,
+		omci.UnsupportedAttributeMask, omci.FailedAttributeMask, omci.Attributes)
+}
+
+func (omci *GetResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.Get) {
+		return me.NewProcessingError("managed entity does not support Get Message-Type")
+	}
+	omci.Result = me.Results(data[4])
+	omci.AttributeMask = binary.BigEndian.Uint16(data[5:7])
+
+	// Attribute decode. Note that the ITU-T G.988 specification states that the
+	//                   Unsupported and Failed attribute masks are always present
+	//                   but only valid if the status code== 9.  However some XGS
+	//                   ONUs (T&W and Alpha, perhaps more) will use these last 4
+	//                   octets for data if the status code == 0.  So accommodate
+	//                   this behaviour in favor of greater interoperability.
+	lastOctet := 36
+	if omci.Result == me.AttributeFailure {
+		lastOctet = 32
+	}
+	omci.Attributes, err = meDefinition.DecodeAttributes(omci.AttributeMask, data[7:lastOctet], p, byte(GetResponseType))
+	if err != nil {
+		return err
+	}
+	// If Attribute failed or Unknown, decode optional attribute mask
+	if omci.Result == me.AttributeFailure {
+		omci.UnsupportedAttributeMask = binary.BigEndian.Uint16(data[32:34])
+		omci.FailedAttributeMask = binary.BigEndian.Uint16(data[34:36])
+	}
+	// Validate all attributes support read
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Read) {
+			msg := fmt.Sprintf("attribute '%v' does not support read access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	if eidDef, eidDefOK := (*meDefinition.GetAttributeDefinitions())[0]; eidDefOK {
+		omci.Attributes[eidDef.GetName()] = omci.EntityInstance
+		return nil
+	}
+	panic("All Managed Entities have an EntityID attribute")
+}
+
+func decodeGetResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetResponse{}
+	omci.MsgLayerType = LayerTypeGetResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.Get) {
+		return me.NewProcessingError("managed entity does not support the Get Message-Type")
+	}
+	bytes, err := b.AppendBytes(3)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	binary.BigEndian.PutUint16(bytes[1:3], omci.AttributeMask)
+
+	// Validate all attributes support read
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Read) {
+			msg := fmt.Sprintf("attribute '%v' does not support read access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	// Attribute serialization
+	switch omci.Result {
+	default:
+		break
+
+	case me.Success, me.AttributeFailure:
+		// TODO: Baseline only supported at this time)
+		bytesAvailable := MaxBaselineLength - 11 - 4 - 8
+
+		err = meDefinition.SerializeAttributes(omci.Attributes, omci.AttributeMask,
+			b, byte(GetResponseType), bytesAvailable)
+		if err != nil {
+			return err
+		}
+		// Calculate space left. Max  - msgType header - OMCI trailer - spacedUsedSoFar
+		bytesLeft := MaxBaselineLength - 4 - 8 - len(b.Bytes())
+
+		remainingBytes, err := b.AppendBytes(bytesLeft + 4)
+		if err != nil {
+			return me.NewMessageTruncatedError(err.Error())
+		}
+		copy(remainingBytes, lotsOfZeros[:])
+		if omci.Result == me.AttributeFailure {
+			binary.BigEndian.PutUint16(remainingBytes[bytesLeft-4:bytesLeft-2], omci.UnsupportedAttributeMask)
+			binary.BigEndian.PutUint16(remainingBytes[bytesLeft-2:bytesLeft], omci.FailedAttributeMask)
+		}
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetAllAlarms
+type GetAllAlarmsRequest struct {
+	MeBasePacket
+	AlarmRetrievalMode byte
+}
+
+func (omci *GetAllAlarmsRequest) String() string {
+	return fmt.Sprintf("%v, Retrieval Mode: %v",
+		omci.MeBasePacket.String(), omci.AlarmRetrievalMode)
+}
+
+func (omci *GetAllAlarmsRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(meDefinition, me.GetAllAlarms) {
+		return me.NewProcessingError("managed entity does not support Get All Alarms Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for Get All Alarms request: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for Get All Alarms request: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.AlarmRetrievalMode = data[4]
+	if omci.AlarmRetrievalMode > 1 {
+		msg := fmt.Sprintf("invalid Alarm Retrieval Mode for Get All Alarms request: %v, must be 0..1",
+			omci.AlarmRetrievalMode)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+func decodeGetAllAlarmsRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetAllAlarmsRequest{}
+	omci.MsgLayerType = LayerTypeGetAllAlarmsRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetAllAlarmsRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(entity, me.GetAllAlarms) {
+		return me.NewProcessingError("managed entity does not support the Get All Alarms Message-Type")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = omci.AlarmRetrievalMode
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetAllAlarms
+type GetAllAlarmsResponse struct {
+	MeBasePacket
+	NumberOfCommands uint16
+}
+
+func (omci *GetAllAlarmsResponse) String() string {
+	return fmt.Sprintf("%v, NumberOfCommands: %d",
+		omci.MeBasePacket.String(), omci.NumberOfCommands)
+}
+
+func (omci *GetAllAlarmsResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(meDefinition, me.GetAllAlarms) {
+		return me.NewProcessingError("managed entity does not support Get All Alarms Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for Get All Alarms response: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for Get All Alarms response: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.NumberOfCommands = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeGetAllAlarmsResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetAllAlarmsResponse{}
+	omci.MsgLayerType = LayerTypeGetAllAlarmsResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetAllAlarmsResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(entity, me.GetAllAlarms) {
+		return me.NewProcessingError("managed entity does not support the Get All Alarms Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], omci.NumberOfCommands)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetAllAlarms
+type GetAllAlarmsNextRequest struct {
+	MeBasePacket
+	CommandSequenceNumber uint16
+}
+
+func (omci *GetAllAlarmsNextRequest) String() string {
+	return fmt.Sprintf("%v, Sequence Number: %d",
+		omci.MeBasePacket.String(), omci.CommandSequenceNumber)
+}
+
+func (omci *GetAllAlarmsNextRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(meDefinition, me.GetAllAlarmsNext) {
+		return me.NewProcessingError("managed entity does not support Get All Alarms Next Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for Get All Alarms Next request: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for Get All Alarms Next request: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.CommandSequenceNumber = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeGetAllAlarmsNextRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetAllAlarmsNextRequest{}
+	omci.MsgLayerType = LayerTypeGetAllAlarmsNextRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetAllAlarmsNextRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms Next
+	if !me.SupportsMsgType(entity, me.GetAllAlarmsNext) {
+		return me.NewProcessingError("managed entity does not support the Get All Alarms Next Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.CommandSequenceNumber)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// GetAllAlarms
+type GetAllAlarmsNextResponse struct {
+	MeBasePacket
+	AlarmEntityClass    me.ClassID
+	AlarmEntityInstance uint16
+	AlarmBitMap         [28]byte // 224 bits
+}
+
+func (omci *GetAllAlarmsNextResponse) String() string {
+	return fmt.Sprintf("%v, CID: %v, EID: (%d/%#x), Bitmap: %v",
+		omci.MeBasePacket.String(), omci.AlarmEntityClass, omci.AlarmEntityInstance,
+		omci.AlarmEntityInstance, omci.AlarmBitMap)
+}
+
+func (omci *GetAllAlarmsNextResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms Next
+	if !me.SupportsMsgType(meDefinition, me.GetAllAlarmsNext) {
+		return me.NewProcessingError("managed entity does not support Get All Alarms Next Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for Get All Alarms Next response: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for Get All Alarms Next response: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.AlarmEntityClass = me.ClassID(binary.BigEndian.Uint16(data[4:6]))
+	omci.AlarmEntityInstance = binary.BigEndian.Uint16(data[6:8])
+
+	copy(omci.AlarmBitMap[:], data[8:36])
+	return nil
+}
+
+func decodeGetAllAlarmsNextResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetAllAlarmsNextResponse{}
+	omci.MsgLayerType = LayerTypeGetAllAlarmsNextResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetAllAlarmsNextResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms Next
+	if !me.SupportsMsgType(entity, me.GetAllAlarmsNext) {
+		return me.NewProcessingError("managed entity does not support the Get All Alarms Next Message-Type")
+	}
+	bytes, err := b.AppendBytes(2 + 2 + 28)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:], uint16(omci.AlarmEntityClass))
+	binary.BigEndian.PutUint16(bytes[2:], omci.AlarmEntityInstance)
+	copy(bytes[4:], omci.AlarmBitMap[:])
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MibUploadRequest
+type MibUploadRequest struct {
+	MeBasePacket
+}
+
+func (omci *MibUploadRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *MibUploadRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	// ME needs to support MIB Upload
+	if !me.SupportsMsgType(meDefinition, me.MibUpload) {
+		return me.NewProcessingError("managed entity does not support MIB Upload Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for  MIB Upload request: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for MIB Upload request: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	return nil
+}
+
+func decodeMibUploadRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibUploadRequest{}
+	omci.MsgLayerType = LayerTypeMibUploadRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibUploadRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.MibUpload) {
+		return me.NewProcessingError("managed entity does not support the MIB Upload Message-Type")
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MibUploadResponse
+type MibUploadResponse struct {
+	MeBasePacket
+	NumberOfCommands uint16
+}
+
+func (omci *MibUploadResponse) String() string {
+	return fmt.Sprintf("%v, NumberOfCommands: %#v",
+		omci.MeBasePacket.String(), omci.NumberOfCommands)
+}
+
+func (omci *MibUploadResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB Upload
+	if !me.SupportsMsgType(meDefinition, me.MibUpload) {
+		return me.NewProcessingError("managed entity does not support MIB Upload Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for  MIB Upload response: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for MIB Upload response: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.NumberOfCommands = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeMibUploadResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibUploadResponse{}
+	omci.MsgLayerType = LayerTypeMibUploadResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibUploadResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB Upload
+	if !me.SupportsMsgType(entity, me.MibUpload) {
+		return me.NewProcessingError("managed entity does not support the MIB Upload Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], omci.NumberOfCommands)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type MibUploadNextRequest struct {
+	MeBasePacket
+	CommandSequenceNumber uint16
+}
+
+func (omci *MibUploadNextRequest) String() string {
+	return fmt.Sprintf("%v, SequenceNumberCountOrSize: %v",
+		omci.MeBasePacket.String(), omci.CommandSequenceNumber)
+}
+
+func (omci *MibUploadNextRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get All Alarms
+	if !me.SupportsMsgType(meDefinition, me.MibUploadNext) {
+		return me.NewProcessingError("managed entity does not support MIB Upload Next Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for  MIB Upload Next request: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for MIB Upload Next request: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	omci.CommandSequenceNumber = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeMibUploadNextRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibUploadNextRequest{}
+	omci.MsgLayerType = LayerTypeMibUploadNextRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibUploadNextRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB upload
+	if !me.SupportsMsgType(entity, me.MibUploadNext) {
+		return me.NewProcessingError("managed entity does not support the MIB Upload Next Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], omci.CommandSequenceNumber)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type MibUploadNextResponse struct {
+	MeBasePacket
+	ReportedME me.ManagedEntity
+}
+
+func (omci *MibUploadNextResponse) String() string {
+	return fmt.Sprintf("%v, ReportedME: [%v]",
+		omci.MeBasePacket.String(), omci.ReportedME.String())
+}
+
+func (omci *MibUploadNextResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MibUploadNext
+	if !me.SupportsMsgType(meDefinition, me.MibUploadNext) {
+		return me.NewProcessingError("managed entity does not support MIB Upload Next Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for  MIB Upload Next response: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for MIB Upload Next response: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	// Decode reported ME.  If an out-of-range sequence number was sent, this will
+	// contain an ME with class ID and entity ID of zero and you should get an
+	// error of "managed entity definition not found" returned.
+	return omci.ReportedME.DecodeFromBytes(data[4:], p, byte(MibUploadNextResponseType))
+}
+
+func decodeMibUploadNextResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibUploadNextResponse{}
+	omci.MsgLayerType = LayerTypeMibUploadNextResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibUploadNextResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB Upload
+	if !me.SupportsMsgType(entity, me.MibUploadNext) {
+		return me.NewProcessingError("managed entity does not support the MIB Upload Next Message-Type")
+	}
+	// TODO: Only Baseline supported at this time
+	bytesAvailable := MaxBaselineLength - 8 - 8
+
+	return omci.ReportedME.SerializeTo(b, byte(MibUploadNextResponseType), bytesAvailable)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MibResetRequest
+type MibResetRequest struct {
+	MeBasePacket
+}
+
+func (omci *MibResetRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *MibResetRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB reset
+	if !me.SupportsMsgType(meDefinition, me.MibReset) {
+		return me.NewProcessingError("managed entity does not support MIB Reset Message-Type")
+	}
+	// Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		msg := fmt.Sprintf("invalid Entity Class for MIB Reset request: %v",
+			omci.EntityClass)
+		return me.NewProcessingError(msg)
+	}
+	if omci.EntityInstance != 0 {
+		msg := fmt.Sprintf("invalid Entity Instance for MIB Reset request: %v",
+			omci.EntityInstance)
+		return me.NewUnknownInstanceError(msg)
+	}
+	return nil
+}
+
+func decodeMibResetRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibResetRequest{}
+	omci.MsgLayerType = LayerTypeMibResetRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibResetRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Add class ID and entity ID
+	return omci.MeBasePacket.SerializeTo(b)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MibResetResponse
+type MibResetResponse struct {
+	MeBasePacket
+	Result me.Results
+}
+
+func (omci *MibResetResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v)",
+		omci.MeBasePacket.String(), omci.Result, omci.Result)
+}
+
+func (omci *MibResetResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support MIB reset
+	if !me.SupportsMsgType(meDefinition, me.MibReset) {
+		return me.NewProcessingError("managed entity does not support MIB Reset Message-Type")
+	}
+	// MIB Reset Response Entity Class always ONU DATA (2) and
+	// Entity Instance of 0
+	if omci.EntityClass != me.OnuDataClassId {
+		return me.NewProcessingError("invalid Entity Class for MIB Reset Response")
+	}
+	if omci.EntityInstance != 0 {
+		return me.NewUnknownInstanceError("invalid Entity Instance for MIB Reset Response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results code: %v, must be 0..8", omci.Result)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+func decodeMibResetResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &MibResetResponse{}
+	omci.MsgLayerType = LayerTypeMibResetResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *MibResetResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(entity, me.MibReset) {
+		return me.NewProcessingError("managed entity does not support the MIB Reset Message-Type")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// AlarmNotificationMsg
+const AlarmBitmapSize = 224
+
+type AlarmNotificationMsg struct {
+	MeBasePacket
+	AlarmBitmap         [AlarmBitmapSize / 8]byte
+	zeroPadding         [3]byte
+	AlarmSequenceNumber byte
+}
+
+func (omci *AlarmNotificationMsg) String() string {
+	return fmt.Sprintf("%v, Sequence Number: %d, Alarm Bitmap: %v",
+		omci.MeBasePacket.String(), omci.AlarmSequenceNumber, omci.AlarmBitmap)
+}
+
+func (omci *AlarmNotificationMsg) IsAlarmActive(alarmNumber uint8) (bool, error) {
+	if alarmNumber >= AlarmBitmapSize {
+		msg := fmt.Sprintf("invalid alarm number: %v, must be 0..224", alarmNumber)
+		return false, errors.New(msg)
+	}
+	octet := alarmNumber / 8
+	bit := 7 - (alarmNumber % 8)
+	return omci.AlarmBitmap[octet]>>bit == 1, nil
+}
+
+func (omci *AlarmNotificationMsg) IsAlarmClear(alarmNumber uint8) (bool, error) {
+	if alarmNumber >= AlarmBitmapSize {
+		msg := fmt.Sprintf("invalid alarm number: %v, must be 0..224", alarmNumber)
+		return false, errors.New(msg)
+	}
+	octet := alarmNumber / 8
+	bit := 7 - (alarmNumber % 8)
+	return omci.AlarmBitmap[octet]>>bit == 0, nil
+}
+
+func (omci *AlarmNotificationMsg) ActivateAlarm(alarmNumber uint8) error {
+	if alarmNumber >= AlarmBitmapSize {
+		msg := fmt.Sprintf("invalid alarm number: %v, must be 0..224", alarmNumber)
+		return errors.New(msg)
+	}
+	octet := alarmNumber / 8
+	bit := 7 - (alarmNumber % 8)
+	omci.AlarmBitmap[octet] |= 1 << bit
+	return nil
+}
+
+func (omci *AlarmNotificationMsg) ClearAlarm(alarmNumber uint8) error {
+	if alarmNumber >= AlarmBitmapSize {
+		msg := fmt.Sprintf("invalid alarm number: %v, must be 0..224", alarmNumber)
+		return errors.New(msg)
+	}
+	octet := alarmNumber / 8
+	bit := 7 - (alarmNumber % 8)
+	omci.AlarmBitmap[octet] &= ^(1 << bit)
+	return nil
+}
+
+func (omci *AlarmNotificationMsg) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	//var meDefinition me.IManagedEntityDefinition
+	//meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+	//	me.ParamData{EntityID: omci.EntityInstance})
+	//if err != nil {
+	//	return err
+	//}
+	// ME needs to support Alarms
+	// TODO: Add attribute to ME to specify that alarm is allowed
+	//if !me.SupportsMsgType(meDefinition, me.MibReset) {
+	//	return me.NewProcesssingError("managed entity does not support MIB Reset Message-Type")
+	//}
+	for index, octet := range data[4 : (AlarmBitmapSize/8)-4] {
+		omci.AlarmBitmap[index] = octet
+	}
+	padOffset := 4 + (AlarmBitmapSize / 8)
+	omci.zeroPadding[0] = data[padOffset]
+	omci.zeroPadding[1] = data[padOffset+1]
+	omci.zeroPadding[2] = data[padOffset+2]
+
+	omci.AlarmSequenceNumber = data[padOffset+3]
+	return nil
+}
+
+func decodeAlarmNotification(data []byte, p gopacket.PacketBuilder) error {
+	omci := &AlarmNotificationMsg{}
+	omci.MsgLayerType = LayerTypeAlarmNotification
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *AlarmNotificationMsg) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	//var meDefinition me.IManagedEntityDefinition
+	//meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+	//	me.ParamData{EntityID: omci.EntityInstance})
+	//if err != nil {
+	//	return err
+	//}
+	// ME needs to support Alarms
+	// TODO: Add attribute to ME to specify that alarm is allowed
+	//if !me.SupportsMsgType(meDefinition, me.MibReset) {
+	//	return me.NewProcessingError("managed entity does not support MIB Reset Message-Type")
+	//}
+	bytes, err := b.AppendBytes((AlarmBitmapSize / 8) + 3 + 1)
+	if err != nil {
+		return err
+	}
+	for index, octet := range omci.AlarmBitmap {
+		bytes[index] = octet
+	}
+	padOffset := AlarmBitmapSize / 8
+	bytes[padOffset] = 0
+	bytes[padOffset+1] = 0
+	bytes[padOffset+2] = 0
+	bytes[padOffset+3] = omci.AlarmSequenceNumber
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// AttributeValueChangeMsg
+type AttributeValueChangeMsg struct {
+	MeBasePacket
+	AttributeMask uint16
+	Attributes    me.AttributeValueMap
+}
+
+func (omci *AttributeValueChangeMsg) String() string {
+	return fmt.Sprintf("%v, Mask: %#x, attributes: %v",
+		omci.MeBasePacket.String(), omci.AttributeMask, omci.Attributes)
+}
+
+func (omci *AttributeValueChangeMsg) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+	// Attribute decode
+	omci.Attributes, err = meDefinition.DecodeAttributes(omci.AttributeMask, data[6:40], p, byte(AttributeValueChangeType))
+	// TODO: Add support for attributes that can have an AVC associated with them and then add a check here
+	// Validate all attributes support AVC
+	//for attrName := range omci.attributes {
+	//	attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+	//	if err != nil {
+	//		return err
+	//	}
+	//	if attr.Index != 0 && !me.SupportsAttributeAVC(attr) {
+	//		msg := fmt.Sprintf("attribute '%v' does not support AVC notifications", attrName)
+	//		return me.NewProcessingError(msg)
+	//	}
+	//}
+	return err
+}
+
+func decodeAttributeValueChange(data []byte, p gopacket.PacketBuilder) error {
+	omci := &AttributeValueChangeMsg{}
+	omci.MsgLayerType = LayerTypeAttributeValueChange
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *AttributeValueChangeMsg) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// TODO: Add support for attributes that can have an AVC associated with them and then add a check here
+	// Validate all attributes support AVC
+	//for attrName := range omci.attributes {
+	//	attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+	//	if err != nil {
+	//		return err
+	//	}
+	//	if attr.Index != 0 && !me.SupportsAttributeAVC(attr) {
+	//		msg := fmt.Sprintf("attribute '%v' does not support AVC notifications", attrName)
+	//		return me.NewProcessingError(msg)
+	//	}
+	//}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.AttributeMask)
+
+	// Attribute serialization
+	// TODO: Only Baseline supported at this time
+	bytesAvailable := MaxBaselineLength - 10 - 8
+
+	return meDefinition.SerializeAttributes(omci.Attributes, omci.AttributeMask, b,
+		byte(AttributeValueChangeType), bytesAvailable)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// TestRequest:		TODO: Not yet implemented
+type TestRequest struct {
+	MeBasePacket
+}
+
+func (omci *TestRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *TestRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // return nil
+}
+
+func decodeTestRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &TestRequest{}
+	omci.MsgLayerType = LayerTypeTestRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *TestRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // omci.cachedME.SerializeTo(mask, b)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// TestResponse:		TODO: Not yet implemented
+type TestResponse struct {
+	MeBasePacket
+}
+
+func (omci *TestResponse) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *TestResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // return nil
+}
+
+func decodeTestResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &TestResponse{}
+	omci.MsgLayerType = LayerTypeTestResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *TestResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // omci.cachedME.SerializeTo(mask, b)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type StartSoftwareDownloadRequest struct {
+	MeBasePacket                // Note: EntityInstance for software download is two specific values
+	WindowSize           byte   // Window Size -1
+	ImageSize            uint32 // Octets
+	NumberOfCircuitPacks byte
+	CircuitPacks         []uint16 // MSB & LSB of software image instance
+}
+
+func (omci *StartSoftwareDownloadRequest) String() string {
+	return fmt.Sprintf("%v, Window Size: %v, Image Size: %v, # Circuit Packs: %v",
+		omci.MeBasePacket.String(), omci.WindowSize, omci.ImageSize, omci.NumberOfCircuitPacks)
+}
+
+func (omci *StartSoftwareDownloadRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Start Software Download
+	if !me.SupportsMsgType(meDefinition, me.StartSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support Start Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Start Software Download request")
+	}
+	omci.WindowSize = data[4]
+	omci.ImageSize = binary.BigEndian.Uint32(data[5:9])
+	omci.NumberOfCircuitPacks = data[9]
+	if omci.NumberOfCircuitPacks < 1 || omci.NumberOfCircuitPacks > 9 {
+		msg := fmt.Sprintf("invalid number of Circuit Packs: %v, must be 1..9",
+			omci.NumberOfCircuitPacks)
+		return me.NewAttributeFailureError(msg)
+	}
+	omci.CircuitPacks = make([]uint16, omci.NumberOfCircuitPacks)
+	for index := 0; index < int(omci.NumberOfCircuitPacks); index++ {
+		omci.CircuitPacks[index] = binary.BigEndian.Uint16(data[10+(index*2):])
+	}
+	return nil
+}
+
+func decodeStartSoftwareDownloadRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &StartSoftwareDownloadRequest{}
+	omci.MsgLayerType = LayerTypeStartSoftwareDownloadRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *StartSoftwareDownloadRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Start Software Download
+	if !me.SupportsMsgType(entity, me.StartSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support the SStart Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Start Software Download request")
+	}
+	if omci.NumberOfCircuitPacks < 1 || omci.NumberOfCircuitPacks > 9 {
+		msg := fmt.Sprintf("invalid number of Circuit Packs: %v, must be 1..9",
+			omci.NumberOfCircuitPacks)
+		return me.NewAttributeFailureError(msg)
+	}
+	bytes, err := b.AppendBytes(6 + (2 * int(omci.NumberOfCircuitPacks)))
+	if err != nil {
+		return err
+	}
+	bytes[0] = omci.WindowSize
+	binary.BigEndian.PutUint32(bytes[1:], omci.ImageSize)
+	bytes[5] = omci.NumberOfCircuitPacks
+	for index := 0; index < int(omci.NumberOfCircuitPacks); index++ {
+		binary.BigEndian.PutUint16(bytes[6+(index*2):], omci.CircuitPacks[index])
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type downloadResults struct {
+	ManagedEntityID uint16 // ME ID of software image entity instance (slot number plus instance 0..1 or 2..254 vendor-specific)
+	Result          me.Results
+}
+
+func (dr *downloadResults) String() string {
+	return fmt.Sprintf("ME: %v (%#x), Results: %d (%v)", dr.ManagedEntityID, dr.ManagedEntityID,
+		dr.Result, dr.Result)
+}
+
+type StartSoftwareDownloadResponse struct {
+	MeBasePacket      // Note: EntityInstance for software download is two specific values
+	Result            me.Results
+	WindowSize        byte // Window Size -1
+	NumberOfInstances byte
+	MeResults         []downloadResults
+}
+
+func (omci *StartSoftwareDownloadResponse) String() string {
+	return fmt.Sprintf("%v, Results: %v, Window Size: %v, # of Instances: %v, ME Results: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.WindowSize, omci.NumberOfInstances, omci.MeResults)
+}
+
+func (omci *StartSoftwareDownloadResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Start Software Download
+	if !me.SupportsMsgType(meDefinition, me.StartSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support Start Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Start Software Download response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for Start Software Download response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	omci.WindowSize = data[5]
+	omci.NumberOfInstances = data[6]
+
+	if omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Circuit Packs: %v, must be 0..9",
+			omci.NumberOfInstances)
+		return errors.New(msg)
+	}
+	if omci.NumberOfInstances > 0 {
+		omci.MeResults = make([]downloadResults, omci.NumberOfInstances)
+
+		for index := 0; index < int(omci.NumberOfInstances); index++ {
+			omci.MeResults[index].ManagedEntityID = binary.BigEndian.Uint16(data[7+(index*3):])
+			omci.MeResults[index].Result = me.Results(data[9+(index*3)])
+			if omci.MeResults[index].Result > me.DeviceBusy {
+				msg := fmt.Sprintf("invalid results for Start Software Download instance %v response: %v, must be 0..6",
+					index, omci.MeResults[index])
+				return errors.New(msg)
+			}
+		}
+	}
+	return nil
+}
+
+func decodeStartSoftwareDownloadResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &StartSoftwareDownloadResponse{}
+	omci.MsgLayerType = LayerTypeStartSoftwareDownloadResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *StartSoftwareDownloadResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Start Software Download
+	if !me.SupportsMsgType(meDefinition, me.StartSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support Start Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Start Software Download response")
+	}
+	bytes, err := b.AppendBytes(3 + (3 * int(omci.NumberOfInstances)))
+	if err != nil {
+		return err
+	}
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for Start Software Download response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	bytes[0] = byte(omci.Result)
+	bytes[1] = omci.WindowSize
+	bytes[2] = omci.NumberOfInstances
+
+	if omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Circuit Packs: %v, must be 0..9",
+			omci.NumberOfInstances)
+		return errors.New(msg)
+	}
+	if omci.NumberOfInstances > 0 {
+		for index := 0; index < int(omci.NumberOfInstances); index++ {
+			binary.BigEndian.PutUint16(bytes[3+(3*index):], omci.MeResults[index].ManagedEntityID)
+
+			if omci.MeResults[index].Result > me.DeviceBusy {
+				msg := fmt.Sprintf("invalid results for Start Software Download instance %v response: %v, must be 0..6",
+					index, omci.MeResults[index])
+				return errors.New(msg)
+			}
+			bytes[5+(3*index)] = byte(omci.MeResults[index].Result)
+		}
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type DownloadSectionRequest struct {
+	MeBasePacket  // Note: EntityInstance for software download is two specific values
+	SectionNumber byte
+	SectionData   [29]byte // 0 padding if final transfer requires only a partial block
+}
+
+func (omci *DownloadSectionRequest) String() string {
+	return fmt.Sprintf("%v, Section #: %v",
+		omci.MeBasePacket.String(), omci.SectionNumber)
+}
+
+func (omci *DownloadSectionRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Download section
+	if !me.SupportsMsgType(meDefinition, me.DownloadSection) {
+		return me.NewProcessingError("managed entity does not support Download Section Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Download Section request")
+	}
+	omci.SectionNumber = data[4]
+	copy(omci.SectionData[0:], data[5:])
+	return nil
+}
+
+func decodeDownloadSectionRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &DownloadSectionRequest{}
+	omci.MsgLayerType = LayerTypeDownloadSectionRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *DownloadSectionRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Download section
+	if !me.SupportsMsgType(meDefinition, me.DownloadSection) {
+		return me.NewProcessingError("managed entity does not support Download Section Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Download Section response")
+	}
+	bytes, err := b.AppendBytes(1 + 29)
+	if err != nil {
+		return err
+	}
+	bytes[0] = omci.SectionNumber
+	copy(bytes[1:], omci.SectionData[0:])
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type DownloadSectionResponse struct {
+	MeBasePacket  // Note: EntityInstance for software download is two specific values
+	Result        me.Results
+	SectionNumber byte
+}
+
+func (omci *DownloadSectionResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Section #: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.SectionNumber)
+}
+
+func (omci *DownloadSectionResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Download section
+	if !me.SupportsMsgType(meDefinition, me.DownloadSection) {
+		return me.NewProcessingError("managed entity does not support Download Section Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Download Section response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for Download Section response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	omci.SectionNumber = data[5]
+	return nil
+}
+
+func decodeDownloadSectionResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &DownloadSectionResponse{}
+	omci.MsgLayerType = LayerTypeDownloadSectionResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *DownloadSectionResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Download section
+	if !me.SupportsMsgType(meDefinition, me.DownloadSection) {
+		return me.NewProcessingError("managed entity does not support Download Section Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Download Section response")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	bytes[0] = omci.SectionNumber
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for Download Section response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	bytes[1] = byte(omci.Result)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type EndSoftwareDownloadRequest struct {
+	MeBasePacket      // Note: EntityInstance for software download is two specific values
+	CRC32             uint32
+	ImageSize         uint32
+	NumberOfInstances byte
+	ImageInstances    []uint16
+}
+
+func (omci *EndSoftwareDownloadRequest) String() string {
+	return fmt.Sprintf("%v, CRC: %#x, Image Size: %v, Number of Instances: %v, Instances: %v",
+		omci.MeBasePacket.String(), omci.CRC32, omci.ImageSize, omci.NumberOfInstances, omci.ImageInstances)
+}
+
+func (omci *EndSoftwareDownloadRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.EndSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support End Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for End Software Download request")
+	}
+	omci.CRC32 = binary.BigEndian.Uint32(data[4:8])
+	omci.ImageSize = binary.BigEndian.Uint32(data[8:12])
+	omci.NumberOfInstances = data[13]
+
+	if omci.NumberOfInstances < 1 || omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Instances: %v, must be 1..9",
+			omci.NumberOfInstances)
+		return me.NewAttributeFailureError(msg)
+	}
+	omci.ImageInstances = make([]uint16, omci.NumberOfInstances)
+
+	for index := 0; index < int(omci.NumberOfInstances); index++ {
+		omci.ImageInstances[index] = binary.BigEndian.Uint16(data[14+(index*2):])
+	}
+	return nil
+}
+
+func decodeEndSoftwareDownloadRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &EndSoftwareDownloadRequest{}
+	omci.MsgLayerType = LayerTypeEndSoftwareDownloadRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *EndSoftwareDownloadRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.EndSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support Start End Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for End Software Download response")
+	}
+	if omci.NumberOfInstances < 1 || omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Instances: %v, must be 1..9",
+			omci.NumberOfInstances)
+		return me.NewAttributeFailureError(msg)
+	}
+	bytes, err := b.AppendBytes(9 + (2 * int(omci.NumberOfInstances)))
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint32(bytes[4:8], omci.CRC32)
+	binary.BigEndian.PutUint32(bytes[8:12], omci.ImageSize)
+	bytes[13] = omci.NumberOfInstances
+	for index := 0; index < int(omci.NumberOfInstances); index++ {
+		binary.BigEndian.PutUint16(bytes[14+(index*2):], omci.ImageInstances[index])
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type EndSoftwareDownloadResponse struct {
+	MeBasePacket      // Note: EntityInstance for software download is two specific values
+	Result            me.Results
+	NumberOfInstances byte
+	MeResults         []downloadResults
+}
+
+func (omci *EndSoftwareDownloadResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Number of Instances: %v, ME Results: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.NumberOfInstances, omci.MeResults)
+}
+
+func (omci *EndSoftwareDownloadResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.EndSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support End Software Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for End Software Download response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for End Software Download response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	omci.NumberOfInstances = data[5]
+
+	if omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Instances: %v, must be 0..9",
+			omci.NumberOfInstances)
+		return errors.New(msg)
+	}
+	if omci.NumberOfInstances > 0 {
+		omci.MeResults = make([]downloadResults, omci.NumberOfInstances)
+
+		for index := 0; index < int(omci.NumberOfInstances); index++ {
+			omci.MeResults[index].ManagedEntityID = binary.BigEndian.Uint16(data[6+(index*3):])
+			omci.MeResults[index].Result = me.Results(data[8+(index*3)])
+			if omci.MeResults[index].Result > me.DeviceBusy {
+				msg := fmt.Sprintf("invalid results for End Software Download instance %v response: %v, must be 0..6",
+					index, omci.MeResults[index])
+				return errors.New(msg)
+			}
+		}
+	}
+	return nil
+}
+
+func decodeEndSoftwareDownloadResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &EndSoftwareDownloadResponse{}
+	omci.MsgLayerType = LayerTypeEndSoftwareDownloadResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *EndSoftwareDownloadResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.EndSoftwareDownload) {
+		return me.NewProcessingError("managed entity does not support End End Download Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for End Download response")
+	}
+	bytes, err := b.AppendBytes(3 + (3 * int(omci.NumberOfInstances)))
+	if err != nil {
+		return err
+	}
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results for End Software Download response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	bytes[0] = byte(omci.Result)
+	bytes[1] = omci.NumberOfInstances
+
+	if omci.NumberOfInstances > 9 {
+		msg := fmt.Sprintf("invalid number of Instances: %v, must be 0..9",
+			omci.NumberOfInstances)
+		return errors.New(msg)
+	}
+	if omci.NumberOfInstances > 0 {
+		for index := 0; index < int(omci.NumberOfInstances); index++ {
+			binary.BigEndian.PutUint16(bytes[2+(3*index):], omci.MeResults[index].ManagedEntityID)
+
+			if omci.MeResults[index].Result > me.DeviceBusy {
+				msg := fmt.Sprintf("invalid results for End Software Download instance %v response: %v, must be 0..6",
+					index, omci.MeResults[index])
+				return errors.New(msg)
+			}
+			bytes[4+(3*index)] = byte(omci.MeResults[index].Result)
+		}
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type ActivateSoftwareRequest struct {
+	MeBasePacket  // Note: EntityInstance for software download is two specific values
+	ActivateFlags byte
+}
+
+func (omci *ActivateSoftwareRequest) String() string {
+	return fmt.Sprintf("%v, Flags: %#x",
+		omci.MeBasePacket.String(), omci.ActivateFlags)
+}
+
+func (omci *ActivateSoftwareRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.ActivateSoftware) {
+		return me.NewProcessingError("managed entity does not support Activate Software Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Activate Software request")
+	}
+	omci.ActivateFlags = data[4]
+	if omci.ActivateFlags > 2 {
+		msg := fmt.Sprintf("invalid number of Activation flangs: %v, must be 0..2",
+			omci.ActivateFlags)
+		return me.NewAttributeFailureError(msg)
+	}
+	return nil
+}
+
+func decodeActivateSoftwareRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &ActivateSoftwareRequest{}
+	omci.MsgLayerType = LayerTypeActivateSoftwareRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *ActivateSoftwareRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.ActivateSoftware) {
+		return me.NewProcessingError("managed entity does not support Activate Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Activate Software request")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = omci.ActivateFlags
+	if omci.ActivateFlags > 2 {
+		msg := fmt.Sprintf("invalid results for Activate Software request: %v, must be 0..2",
+			omci.ActivateFlags)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type ActivateSoftwareResponse struct {
+	MeBasePacket
+	Result me.Results
+}
+
+func (omci *ActivateSoftwareResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v)",
+		omci.MeBasePacket.String(), omci.Result, omci.Result)
+}
+
+func (omci *ActivateSoftwareResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.ActivateSoftware) {
+		return me.NewProcessingError("managed entity does not support Activate Software Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Activate Software response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.Results(6) {
+		msg := fmt.Sprintf("invalid results for Activate Software response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+func decodeActivateSoftwareResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &ActivateSoftwareResponse{}
+	omci.MsgLayerType = LayerTypeActivateSoftwareResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *ActivateSoftwareResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.ActivateSoftware) {
+		return me.NewProcessingError("managed entity does not support Activate Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Activate Software response")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	if omci.Result > me.Results(6) {
+		msg := fmt.Sprintf("invalid results for Activate Software response: %v, must be 0..6",
+			omci.Result)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type CommitSoftwareRequest struct {
+	MeBasePacket
+}
+
+func (omci *CommitSoftwareRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *CommitSoftwareRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.CommitSoftware) {
+		return me.NewProcessingError("managed entity does not support Commit Software Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Commit Software request")
+	}
+	return nil
+}
+
+func decodeCommitSoftwareRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &CommitSoftwareRequest{}
+	omci.MsgLayerType = LayerTypeCommitSoftwareRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *CommitSoftwareRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.CommitSoftware) {
+		return me.NewProcessingError("managed entity does not support Commit Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Commit Software request")
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type CommitSoftwareResponse struct {
+	MeBasePacket
+}
+
+func (omci *CommitSoftwareResponse) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *CommitSoftwareResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.CommitSoftware) {
+		return me.NewProcessingError("managed entity does not support Commit Software Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Commit Software response")
+	}
+	return nil
+}
+
+func decodeCommitSoftwareResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &CommitSoftwareResponse{}
+	omci.MsgLayerType = LayerTypeCommitSoftwareResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *CommitSoftwareResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support End Software Download
+	if !me.SupportsMsgType(meDefinition, me.CommitSoftware) {
+		return me.NewProcessingError("managed entity does not support Commit Message-Type")
+	}
+	// Software Image Entity Class are always use the Software Image
+	if omci.EntityClass != me.SoftwareImageClassId {
+		return me.NewProcessingError("invalid Entity Class for Commit Software response")
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type SynchronizeTimeRequest struct {
+	MeBasePacket
+	Year   uint16
+	Month  uint8
+	Day    uint8
+	Hour   uint8
+	Minute uint8
+	Second uint8
+}
+
+func (omci *SynchronizeTimeRequest) String() string {
+	return fmt.Sprintf("%v, Date-Time: %d/%d/%d-%02d:%02d:%02d",
+		omci.MeBasePacket.String(), omci.Year, omci.Month, omci.Day, omci.Hour, omci.Minute, omci.Second)
+}
+
+func (omci *SynchronizeTimeRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Synchronize Time
+	if !me.SupportsMsgType(meDefinition, me.SynchronizeTime) {
+		return me.NewProcessingError("managed entity does not support Synchronize Time Message-Type")
+	}
+	// Synchronize Time Entity Class are always ONU-G (256) and Entity Instance of 0
+	if omci.EntityClass != me.OnuGClassId {
+		return me.NewProcessingError("invalid Entity Class for Synchronize Time request")
+	}
+	if omci.EntityInstance != 0 {
+		return me.NewUnknownInstanceError("invalid Entity Instance for Synchronize Time request")
+	}
+	omci.Year = binary.BigEndian.Uint16(data[4:6])
+	omci.Month = data[6]
+	omci.Day = data[7]
+	omci.Hour = data[8]
+	omci.Minute = data[9]
+	omci.Second = data[10]
+	return nil
+}
+
+func decodeSynchronizeTimeRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SynchronizeTimeRequest{}
+	omci.MsgLayerType = LayerTypeSynchronizeTimeRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SynchronizeTimeRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Synchronize Time
+	if !me.SupportsMsgType(entity, me.SynchronizeTime) {
+		return me.NewProcessingError("managed entity does not support the Synchronize Time Message-Type")
+	}
+	bytes, err := b.AppendBytes(7)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], omci.Year)
+	bytes[2] = omci.Month
+	bytes[3] = omci.Day
+	bytes[4] = omci.Hour
+	bytes[5] = omci.Minute
+	bytes[6] = omci.Second
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type SynchronizeTimeResponse struct {
+	MeBasePacket
+	Result         me.Results
+	SuccessResults uint8 // Only if 'Result' is 0 -> success
+}
+
+func (omci *SynchronizeTimeResponse) String() string {
+	return fmt.Sprintf("%v, Results: %d (%v), Success: %d",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.SuccessResults)
+}
+
+func (omci *SynchronizeTimeResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Synchronize Time
+	if !me.SupportsMsgType(meDefinition, me.SynchronizeTime) {
+		return me.NewProcessingError("managed entity does not support Synchronize Time Message-Type")
+	}
+	// Synchronize Time Entity Class are always ONU-G (256) and Entity Instance of 0
+	if omci.EntityClass != me.OnuGClassId {
+		return me.NewProcessingError("invalid Entity Class for Synchronize Time response")
+	}
+	if omci.EntityInstance != 0 {
+		return me.NewUnknownInstanceError("invalid Entity Instance for Synchronize Time response")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > me.DeviceBusy {
+		msg := fmt.Sprintf("invalid results code: %v, must be 0..8", omci.Result)
+		return errors.New(msg)
+	}
+	omci.SuccessResults = data[5]
+	return nil
+}
+
+func decodeSynchronizeTimeResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SynchronizeTimeResponse{}
+	omci.MsgLayerType = LayerTypeSynchronizeTimeResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SynchronizeTimeResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// Synchronize Time Entity Class are always ONU DATA (2) and Entity Instance of 0
+	if omci.EntityClass != me.OnuGClassId {
+		return me.NewProcessingError("invalid Entity Class for Synchronize Time response")
+	}
+	if omci.EntityInstance != 0 {
+		return me.NewUnknownInstanceError("invalid Entity Instance for Synchronize Time response")
+	}
+	// ME needs to support Synchronize Time
+	if !me.SupportsMsgType(entity, me.SynchronizeTime) {
+		return me.NewProcessingError("managed entity does not support the Synchronize Time Message-Type")
+	}
+	numBytes := 2
+	if omci.Result != me.Success {
+		numBytes = 1
+	}
+	bytes, err := b.AppendBytes(numBytes)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(omci.Result)
+	if omci.Result == me.Success {
+		bytes[1] = omci.SuccessResults
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type RebootRequest struct {
+	MeBasePacket
+	RebootCondition byte
+}
+
+func (omci *RebootRequest) String() string {
+	return fmt.Sprintf("%v, Reboot Condition: %v",
+		omci.MeBasePacket.String(), omci.RebootCondition)
+}
+
+func (omci *RebootRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Reboot
+	if !me.SupportsMsgType(meDefinition, me.Reboot) {
+		return me.NewProcessingError("managed entity does not support Reboot Message-Type")
+	}
+	omci.RebootCondition = data[4]
+	if omci.RebootCondition > 3 {
+		msg := fmt.Sprintf("invalid reboot condition code: %v, must be 0..3", omci.RebootCondition)
+		return errors.New(msg)
+	}
+	return nil
+}
+
+func decodeRebootRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &RebootRequest{}
+	omci.MsgLayerType = LayerTypeRebootRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *RebootRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Reboot
+	if !me.SupportsMsgType(entity, me.Reboot) {
+		return me.NewProcessingError("managed entity does not support the Synchronize Time Message-Type")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	if omci.RebootCondition > 3 {
+		msg := fmt.Sprintf("invalid reboot condition code: %v, must be 0..3", omci.RebootCondition)
+		return me.NewAttributeFailureError(msg)
+	}
+	bytes[0] = omci.RebootCondition
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type RebootResponse struct {
+	MeBasePacket
+	Result me.Results
+}
+
+func (omci *RebootResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v)",
+		omci.MeBasePacket.String(), omci.Result, omci.Result)
+}
+
+func (omci *RebootResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Reboot
+	if !me.SupportsMsgType(meDefinition, me.Reboot) {
+		return me.NewProcessingError("managed entity does not support Reboot Message-Type")
+	}
+	if omci.Result > 6 {
+		msg := fmt.Sprintf("invalid reboot results code: %v, must be 0..6", omci.Result)
+		return errors.New(msg)
+	}
+	omci.Result = me.Results(data[4])
+	return nil
+}
+
+func decodeRebootResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &RebootResponse{}
+	omci.MsgLayerType = LayerTypeRebootResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *RebootResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var entity me.IManagedEntityDefinition
+	entity, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Reboot
+	if !me.SupportsMsgType(entity, me.Reboot) {
+		return me.NewProcessingError("managed entity does not support the Synchronize Time Message-Type")
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	if omci.Result > 6 {
+		msg := fmt.Sprintf("invalid reboot results code: %v, must be 0..6", omci.Result)
+		return errors.New(msg)
+	}
+	bytes[0] = byte(omci.Result)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type GetNextRequest struct {
+	MeBasePacket
+	AttributeMask  uint16
+	SequenceNumber uint16
+}
+
+func (omci *GetNextRequest) String() string {
+	return fmt.Sprintf("%v, Attribute Mask: %#x, Sequence Number: %v",
+		omci.MeBasePacket.String(), omci.AttributeMask, omci.SequenceNumber)
+}
+
+func (omci *GetNextRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support GetNext
+	if !me.SupportsMsgType(meDefinition, me.GetNext) {
+		return me.NewProcessingError("managed entity does not support Get Next Message-Type")
+	}
+	// Note: G.988 specifies that an error code of (3) should result if more
+	//       than one attribute is requested
+	// TODO: Return error.  Have flag to optionally allow it to be encoded
+	// TODO: Check that the attribute is a table attirbute.  Issue warning or return error
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+	omci.SequenceNumber = binary.BigEndian.Uint16(data[6:8])
+	return nil
+}
+
+func decodeGetNextRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetNextRequest{}
+	omci.MsgLayerType = LayerTypeGetNextRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetNextRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support GetNext
+	if !me.SupportsMsgType(meDefinition, me.GetNext) {
+		return me.NewProcessingError("managed entity does not support Get Next Message-Type")
+	}
+	bytes, err := b.AppendBytes(4)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.AttributeMask)
+	binary.BigEndian.PutUint16(bytes[2:], omci.SequenceNumber)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type GetNextResponse struct {
+	MeBasePacket
+	Result        me.Results
+	AttributeMask uint16
+	Attributes    me.AttributeValueMap
+}
+
+func (omci *GetNextResponse) String() string {
+	return fmt.Sprintf("%v, Result: %v, Attribute Mask: %#x, Attributes: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.AttributeMask, omci.Attributes)
+}
+
+func (omci *GetNextResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(meDefinition, me.GetNext) {
+		return me.NewProcessingError("managed entity does not support Get Next Message-Type")
+	}
+	omci.Result = me.Results(data[4])
+	if omci.Result > 6 {
+		msg := fmt.Sprintf("invalid get next results code: %v, must be 0..6", omci.Result)
+		return errors.New(msg)
+	}
+	omci.AttributeMask = binary.BigEndian.Uint16(data[5:7])
+
+	// Attribute decode
+	omci.Attributes, err = meDefinition.DecodeAttributes(omci.AttributeMask, data[7:], p, byte(GetNextResponseType))
+	if err != nil {
+		return err
+	}
+	// Validate all attributes support read
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Read) {
+			msg := fmt.Sprintf("attribute '%v' does not support read access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	if eidDef, eidDefOK := (*meDefinition.GetAttributeDefinitions())[0]; eidDefOK {
+		omci.Attributes[eidDef.GetName()] = omci.EntityInstance
+		return nil
+	}
+	panic("All Managed Entities have an EntityID attribute")
+}
+
+func decodeGetNextResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetNextResponse{}
+	omci.MsgLayerType = LayerTypeGetNextResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetNextResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.GetNext) {
+		return me.NewProcessingError("managed entity does not support the Get Next Message-Type")
+	}
+	bytes, err := b.AppendBytes(3)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	if omci.Result > 6 {
+		msg := fmt.Sprintf("invalid get next results code: %v, must be 0..6", omci.Result)
+		return errors.New(msg)
+	}
+	binary.BigEndian.PutUint16(bytes[1:3], omci.AttributeMask)
+
+	// Validate all attributes support read
+	for attrName := range omci.Attributes {
+		attr, err := me.GetAttributeDefinitionByName(meDefinition.GetAttributeDefinitions(), attrName)
+		if err != nil {
+			return err
+		}
+		if attr.Index != 0 && !me.SupportsAttributeAccess(attr, me.Read) {
+			msg := fmt.Sprintf("attribute '%v' does not support read access", attrName)
+			return me.NewProcessingError(msg)
+		}
+	}
+	// Attribute serialization
+	switch omci.Result {
+	default:
+		break
+
+	case me.Success:
+		// TODO: Only Baseline supported at this time
+		bytesAvailable := MaxBaselineLength - 11 - 8
+
+		err = meDefinition.SerializeAttributes(omci.Attributes, omci.AttributeMask, b,
+			byte(GetNextResponseType), bytesAvailable)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type TestResultMsg struct {
+	MeBasePacket
+}
+
+func (omci *TestResultMsg) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *TestResultMsg) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // return nil
+}
+
+func decodeTestResult(data []byte, p gopacket.PacketBuilder) error {
+	omci := &TestResultMsg{}
+	omci.MsgLayerType = LayerTypeTestResult
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *TestResultMsg) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me) // omci.cachedME.SerializeTo(mask, b)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type GetCurrentDataRequest struct {
+	MeBasePacket
+	AttributeMask uint16
+}
+
+func (omci *GetCurrentDataRequest) String() string {
+	return fmt.Sprintf("%v, Attribute Mask: %#x",
+		omci.MeBasePacket.String(), omci.AttributeMask)
+}
+
+func (omci *GetCurrentDataRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support GetNext
+	if !me.SupportsMsgType(meDefinition, me.GetCurrentData) {
+		return me.NewProcessingError("managed entity does not support Get Current Data Message-Type")
+	}
+	// Note: G.988 specifies that an error code of (3) should result if more
+	//       than one attribute is requested
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+	return nil
+}
+
+func decodeGetCurrentDataRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetCurrentDataRequest{}
+	omci.MsgLayerType = LayerTypeGetCurrentDataRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetCurrentDataRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support GetNext
+	if !me.SupportsMsgType(meDefinition, me.GetCurrentData) {
+		return me.NewProcessingError("managed entity does not support Get Current Data Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, omci.AttributeMask)
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type GetCurrentDataResponse struct {
+	MeBasePacket
+	Result        me.Results
+	AttributeMask uint16
+	Attributes    me.AttributeValueMap
+}
+
+func (omci *GetCurrentDataResponse) String() string {
+	return fmt.Sprintf("%v, Result: %d (%v), Attribute Mask: %#x, Attributes: %v",
+		omci.MeBasePacket.String(), omci.Result, omci.Result, omci.AttributeMask, omci.Attributes)
+}
+
+func (omci *GetCurrentDataResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Set
+	if !me.SupportsMsgType(meDefinition, me.GetCurrentData) {
+		return me.NewProcessingError("managed entity does not support Get Current Data Message-Type")
+	}
+	omci.AttributeMask = binary.BigEndian.Uint16(data[4:6])
+
+	// Attribute decode
+	omci.Attributes, err = meDefinition.DecodeAttributes(omci.AttributeMask, data[6:], p, byte(GetCurrentDataResponseType))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func decodeGetCurrentDataResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &GetCurrentDataResponse{}
+	omci.MsgLayerType = LayerTypeGetCurrentDataResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *GetCurrentDataResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	var meDefinition me.IManagedEntityDefinition
+	meDefinition, err = me.LoadManagedEntityDefinition(omci.EntityClass,
+		me.ParamData{EntityID: omci.EntityInstance})
+	if err != nil {
+		return err
+	}
+	// ME needs to support Get
+	if !me.SupportsMsgType(meDefinition, me.GetCurrentData) {
+		return me.NewProcessingError("managed entity does not support the Get Current Data Message-Type")
+	}
+	bytes, err := b.AppendBytes(2)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], omci.AttributeMask)
+
+	// Attribute serialization
+	// TODO: Only Baseline supported at this time
+	bytesAvailable := MaxBaselineLength - 9 - 8
+
+	err = meDefinition.SerializeAttributes(omci.Attributes, omci.AttributeMask, b,
+		byte(GetCurrentDataResponseType), bytesAvailable)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type SetTableRequest struct {
+	MeBasePacket
+	// TODO: Fix me when extended messages supported)
+}
+
+func (omci *SetTableRequest) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *SetTableRequest) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me when extended messages supported)
+}
+
+func decodeSetTableRequest(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SetTableRequest{}
+	omci.MsgLayerType = LayerTypeSetTableRequest
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SetTableRequest) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") /// TODO: Fix me when extended messages supported)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type SetTableResponse struct {
+	MeBasePacket
+	// TODO: Fix me when extended messages supported)
+}
+
+func (omci *SetTableResponse) String() string {
+	return fmt.Sprintf("%v", omci.MeBasePacket.String())
+}
+
+func (omci *SetTableResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	// Common ClassID/EntityID decode in msgBase
+	err := omci.MeBasePacket.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me when extended messages supported)
+}
+
+func decodeSetTableResponse(data []byte, p gopacket.PacketBuilder) error {
+	omci := &SetTableResponse{}
+	omci.MsgLayerType = LayerTypeSetTableResponse
+	return decodingLayerDecoder(omci, data, p)
+}
+
+func (omci *SetTableResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	return errors.New("need to implement") // TODO: Fix me when extended messages supported)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//
+type UnsupportedMessageTypeResponse struct {
+	MeBasePacket
+	Result me.Results
+}
+
+func (omci *UnsupportedMessageTypeResponse) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	return errors.New("you should never really decode this")
+}
+
+func (omci *UnsupportedMessageTypeResponse) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Basic (common) OMCI Header is 8 octets, 10
+	err := omci.MeBasePacket.SerializeTo(b)
+	if err != nil {
+		return err
+	}
+	bytes, err := b.AppendBytes(1)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(omci.Result)
+	return nil
+}
diff --git a/vendor/github.com/cboling/omci/omci.go b/vendor/github.com/cboling/omci/omci.go
new file mode 100644
index 0000000..f4741b3
--- /dev/null
+++ b/vendor/github.com/cboling/omci/omci.go
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2018 - present.  Boling Consulting Solutions (bcsw.net)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package omci
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/aead/cmac/aes"
+	me "github.com/cboling/omci/generated"
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+)
+
+type DeviceIdent byte
+
+var (
+	LayerTypeOMCI gopacket.LayerType
+)
+
+func init() {
+	LayerTypeOMCI = gopacket.RegisterLayerType(1000,
+		gopacket.LayerTypeMetadata{
+			Name:    "OMCI",
+			Decoder: gopacket.DecodeFunc(decodeOMCI),
+		})
+}
+
+const (
+	// Device Identifiers
+	_                         = iota
+	BaselineIdent DeviceIdent = 0x0A // All G-PON OLTs and ONUs support the baseline message set
+	ExtendedIdent DeviceIdent = 0x0B
+)
+
+var OmciIK = []byte{0x18, 0x4b, 0x8a, 0xd4, 0xd1, 0xac, 0x4a, 0xf4,
+	0xdd, 0x4b, 0x33, 0x9e, 0xcc, 0x0d, 0x33, 0x70}
+
+func (di DeviceIdent) String() string {
+	switch di {
+	default:
+		return "Unknown"
+
+	case BaselineIdent:
+		return "Baseline"
+
+	case ExtendedIdent:
+		return "Extended"
+	}
+}
+
+// MaxBaselineLength is the maximum number of octets allowed in an OMCI Baseline
+// message.  Depending on the adapter, it may or may not include the
+const MaxBaselineLength = 48
+
+// MaxExtendedLength is the maximum number of octets allowed in an OMCI Extended
+// message (including header).
+const MaxExtendedLength = 1980
+
+// MaxAttributeMibUploadNextBaselineLength is the maximum payload size for attributes for
+// a Baseline MIB Upload Next message.29
+const MaxAttributeMibUploadNextBaselineLength = MaxBaselineLength - 14 - 8
+
+// MaxAttributeGetNextBaselineLength is the maximum payload size for attributes for
+// a Baseline MIB Get Next message. This is just the attribute portion of the
+// message contents and does not include the Result Code & Attribute Mask.
+const MaxAttributeGetNextBaselineLength = MaxBaselineLength - 11 - 8
+
+// MaxAttributeMibUploadNextExtendedLength is the maximum payload size for ME
+// entries for an Extended MIB Upload Next message. Extended messages differ from
+// the baseline as multiple MEs can be reported in a single frame, just not multiple
+// attributes.
+const MaxManagedEntityMibUploadNextExtendedLength = MaxExtendedLength - 10 - 4
+
+// MaxAttributeGetNextExtendedLength is the maximum payload size for attributes for
+// a Extended MIB Get Next message. This is just the attribute portion of the
+// message contents and does not include the Result Code & Attribute Mask.
+const MaxAttributeGetNextExtendedLength = MaxExtendedLength - 13 - 4
+
+// NullEntityID is often used as the Null/void Managed Entity ID for attributes
+// that are used to refer to other Managed Entities but are currently not provisioned.
+const NullEntityID = uint16(0xffff)
+
+// OMCI defines the common protocol. Extended will be added once
+// I can get basic working (and layered properly).  See ITU-T G.988 11/2017 section
+// A.3 for more information
+type OMCI struct {
+	layers.BaseLayer
+	TransactionID    uint16
+	MessageType      MessageType
+	DeviceIdentifier DeviceIdent
+	Payload          []byte
+	padding          []byte
+	Length           uint16
+	MIC              uint32
+}
+
+func (omci *OMCI) String() string {
+	//msgType := me.MsgType(byte(omci.MessageType) & me.MsgTypeMask)
+	//if me.IsAutonomousNotification(msgType) {
+	//	return fmt.Sprintf("OMCI: Type: %v:", msgType)
+	//} else if byte(omci.MessageType)&me.AK == me.AK {
+	//	return fmt.Sprintf("OMCI: Type: %v Response", msgType)
+	//}
+	return fmt.Sprintf("Type: %v, TID: %d (%#x), Ident: %v",
+		omci.MessageType, omci.TransactionID, omci.TransactionID, omci.DeviceIdentifier)
+}
+
+// LayerType returns LayerTypeOMCI
+func (omci *OMCI) LayerType() gopacket.LayerType {
+	return LayerTypeOMCI
+}
+
+func (omci *OMCI) LayerContents() []byte {
+	b := make([]byte, 8)
+	binary.BigEndian.PutUint16(b, omci.TransactionID)
+	b[2] = byte(omci.MessageType)
+	b[3] = byte(omci.DeviceIdentifier)
+	return b
+}
+
+func (omci *OMCI) CanDecode() gopacket.LayerClass {
+	return LayerTypeOMCI
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (omci *OMCI) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+func decodeOMCI(data []byte, p gopacket.PacketBuilder) error {
+	// Allow baseline messages without Length & MIC, but no less
+	if len(data) < MaxBaselineLength-8 {
+		return errors.New("frame header too small")
+	}
+	switch DeviceIdent(data[3]) {
+	default:
+		return errors.New("unsupported message type")
+
+	case BaselineIdent:
+		//omci := &BaselineMessage{}
+		omci := &OMCI{}
+		return omci.DecodeFromBytes(data, p)
+
+	case ExtendedIdent:
+		//omci := &ExtendedMessage{}
+		omci := &OMCI{}
+		return omci.DecodeFromBytes(data, p)
+	}
+}
+
+func calculateMicAes128(data []byte) (uint32, error) {
+	// See if upstream or downstream
+	var downstreamCDir = [...]byte{0x01}
+	var upstreamCDir = [...]byte{0x02}
+
+	tid := binary.BigEndian.Uint16(data[0:2])
+	var sum []byte
+	var err error
+
+	if (data[2]&me.AK) == me.AK || tid == 0 {
+		sum, err = aes.Sum(append(upstreamCDir[:], data[:44]...), OmciIK, 4)
+	} else {
+		sum, err = aes.Sum(append(downstreamCDir[:], data[:44]...), OmciIK, 4)
+	}
+	if err != nil {
+		return 0, err
+	}
+	return binary.BigEndian.Uint32(sum), nil
+}
+
+/////////////////////////////////////////////////////////////////////////////
+//   Baseline Message encode / decode
+//
+
+func (omci *OMCI) DecodeFromBytes(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 10 {
+		p.SetTruncated()
+		return errors.New("frame too small")
+	}
+	omci.TransactionID = binary.BigEndian.Uint16(data[0:])
+	omci.MessageType = MessageType(data[2])
+	omci.DeviceIdentifier = DeviceIdent(data[3])
+
+	isNotification := (int(omci.MessageType) & ^me.MsgTypeMask) == 0
+	if omci.TransactionID == 0 && !isNotification {
+		return errors.New("omci Transaction ID is zero for non-Notification type message")
+	}
+	// Decode length
+	var payloadOffset int
+	var micOffset int
+	if omci.DeviceIdentifier == BaselineIdent {
+		omci.Length = MaxBaselineLength - 8
+		payloadOffset = 8
+		micOffset = MaxBaselineLength - 4
+
+		if len(data) >= micOffset {
+			length := binary.BigEndian.Uint32(data[micOffset-4:])
+			if uint16(length) != omci.Length {
+				return me.NewProcessingError("invalid baseline message length")
+			}
+		}
+	} else {
+		payloadOffset = 10
+		omci.Length = binary.BigEndian.Uint16(data[8:10])
+		micOffset = int(omci.Length) + payloadOffset
+
+		if omci.Length > MaxExtendedLength {
+			return me.NewProcessingError("extended frame exceeds maximum allowed")
+		}
+		if int(omci.Length) != micOffset {
+			if int(omci.Length) < micOffset {
+				p.SetTruncated()
+			}
+			return me.NewProcessingError("extended frame too small")
+		}
+	}
+	// Extract MIC if present in the data
+	if len(data) >= micOffset+4 {
+		omci.MIC = binary.BigEndian.Uint32(data[micOffset:])
+		actual, _ := calculateMicAes128(data[:micOffset])
+		if omci.MIC != actual {
+			_ = fmt.Sprintf("invalid MIC, expected %#x, got %#x",
+				omci.MIC, actual)
+			//return errors.New(msg)
+		}
+	}
+	omci.BaseLayer = layers.BaseLayer{data[:4], data[4:]}
+	p.AddLayer(omci)
+	nextLayer, err := MsgTypeToNextLayer(omci.MessageType)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(nextLayer)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (omci *OMCI) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// TODO: Hardcoded for baseline message format for now. Will eventually need to support
+	//       the extended message format.
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	// OMCI layer error checks
+	isNotification := (int(omci.MessageType) & ^me.MsgTypeMask) == 0
+	if omci.TransactionID == 0 && !isNotification {
+		return errors.New("omci Transaction ID is zero for non-Notification type message")
+	}
+	if omci.DeviceIdentifier == 0 {
+		omci.DeviceIdentifier = BaselineIdent // Allow uninitialized device identifier
+	}
+	if omci.DeviceIdentifier == BaselineIdent {
+		if omci.Length == 0 {
+			omci.Length = MaxBaselineLength - 8 // Allow uninitialized length
+		} else if omci.Length != MaxBaselineLength-8 {
+			msg := fmt.Sprintf("invalid Baseline message length: %v", omci.Length)
+			return errors.New(msg)
+		}
+	} else if omci.DeviceIdentifier == ExtendedIdent {
+		if omci.Length == 0 {
+			omci.Length = uint16(len(bytes) - 10) // Allow uninitialized length
+		}
+		if omci.Length > MaxExtendedLength {
+			msg := fmt.Sprintf("invalid Baseline message length: %v", omci.Length)
+			return errors.New(msg)
+		}
+	} else {
+		msg := fmt.Sprintf("invalid device identifier: %#x, Baseline or Extended expected",
+			omci.DeviceIdentifier)
+		return errors.New(msg)
+	}
+	binary.BigEndian.PutUint16(bytes, omci.TransactionID)
+	bytes[2] = byte(omci.MessageType)
+	bytes[3] = byte(omci.DeviceIdentifier)
+	b.PushLayer(LayerTypeOMCI)
+
+	bufLen := len(b.Bytes())
+	padSize := int(omci.Length) - bufLen + 4
+	if padSize < 0 {
+		msg := fmt.Sprintf("invalid OMCI Message Type length, exceeded allowed frame size by %d bytes",
+			-padSize)
+		return errors.New(msg)
+	}
+	padding, err := b.AppendBytes(padSize)
+	copy(padding, lotsOfZeros[:])
+
+	if omci.DeviceIdentifier == BaselineIdent {
+		// For baseline, always provide the length
+		binary.BigEndian.PutUint32(b.Bytes()[MaxBaselineLength-8:], 40)
+	}
+	if opts.ComputeChecksums {
+		micBytes, err := b.AppendBytes(4)
+		if err != nil {
+			return err
+		}
+		omci.MIC, _ = calculateMicAes128(bytes[:MaxBaselineLength-4])
+		binary.BigEndian.PutUint32(micBytes, omci.MIC)
+	}
+	return nil
+}
+
+// hacky way to zero out memory... there must be a better way?
+var lotsOfZeros [MaxExtendedLength]byte // Extended OMCI messages may be up to 1980 bytes long, including headers
diff --git a/vendor/github.com/deckarep/golang-set/.gitignore b/vendor/github.com/deckarep/golang-set/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/deckarep/golang-set/.travis.yml b/vendor/github.com/deckarep/golang-set/.travis.yml
new file mode 100644
index 0000000..c760d24
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+    - 1.8
+    - 1.9
+    - tip
+
+script:
+    - go test -race ./...
+    - go test -bench=.
+
diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE
new file mode 100644
index 0000000..b5768f8
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/LICENSE
@@ -0,0 +1,22 @@
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md
new file mode 100644
index 0000000..c3b50b2
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/README.md
@@ -0,0 +1,95 @@
+[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set)
+[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set)
+[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set)
+
+## golang-set
+
+
+The missing set collection for the Go language.  Until Go has sets built-in...use this.
+
+Coming from Python one of the things I miss is the superbly wonderful set collection.  This is my attempt to mimic the primary features of the set from Python.
+You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library.  To those I say simply ignore this repository
+and carry-on and to the rest that find this useful please contribute in helping me make it better by:
+
+* Helping to make more idiomatic improvements to the code.
+* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
+* Helping to make the unit-tests more robust and kick-ass.
+* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
+* Simply offering feedback and suggestions.  (Positive, constructive feedback is appreciated.)
+
+I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
+
+*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework.  This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
+
+## Features (as of 9/22/2014)
+
+* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product)
+
+## Features (as of 9/15/2014)
+
+* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
+
+## Features (as of 4/22/2014)
+
+* One common interface to both implementations
+* Two set implementations to choose from
+  * a thread-safe implementation designed for concurrent use
+  * a non-thread-safe implementation designed for performance
+* 75 benchmarks for both implementations
+* 35 unit tests for both implementations
+* 14 concurrent tests for the thread-safe implementation
+
+
+
+Please see the unit test file for additional usage examples.  The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html)    Please keep in mind
+however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
+
+## Examples but not exhaustive:
+
+```go
+requiredClasses := mapset.NewSet()
+requiredClasses.Add("Cooking")
+requiredClasses.Add("English")
+requiredClasses.Add("Math")
+requiredClasses.Add("Biology")
+
+scienceSlice := []interface{}{"Biology", "Chemistry"}
+scienceClasses := mapset.NewSetFromSlice(scienceSlice)
+
+electiveClasses := mapset.NewSet()
+electiveClasses.Add("Welding")
+electiveClasses.Add("Music")
+electiveClasses.Add("Automotive")
+
+bonusClasses := mapset.NewSet()
+bonusClasses.Add("Go Programming")
+bonusClasses.Add("Python Programming")
+
+//Show me all the available classes I can take
+allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
+fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
+
+
+//Is cooking considered a science class?
+fmt.Println(scienceClasses.Contains("Cooking")) //false
+
+//Show me all classes that are not science classes, since I hate science.
+fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
+
+//Which science classes are also required classes?
+fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
+
+//How many bonus classes do you offer?
+fmt.Println(bonusClasses.Cardinality()) //2
+
+//Do you have the following classes? Welding, Automotive and English?
+fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
+```
+
+Thanks!
+
+-Ralph
+
+[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
+
+[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon)
diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go
new file mode 100644
index 0000000..9dfecad
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/iterator.go
@@ -0,0 +1,58 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's
+// elements.
+type Iterator struct {
+	C    <-chan interface{}
+	stop chan struct{}
+}
+
+// Stop stops the Iterator, no further elements will be received on C, C will be closed.
+func (i *Iterator) Stop() {
+	// Allows for Stop() to be called multiple times
+	// (close() panics when called on already closed channel)
+	defer func() {
+		recover()
+	}()
+
+	close(i.stop)
+
+	// Exhaust any remaining elements.
+	for range i.C {
+	}
+}
+
+// newIterator returns a new Iterator instance together with its item and stop channels.
+func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) {
+	itemChan := make(chan interface{})
+	stopChan := make(chan struct{})
+	return &Iterator{
+		C:    itemChan,
+		stop: stopChan,
+	}, itemChan, stopChan
+}
diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go
new file mode 100644
index 0000000..29eb2e5
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/set.go
@@ -0,0 +1,217 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+// Package mapset implements a simple and generic set collection.
+// Items stored within it are unordered and unique. It supports
+// typical set operations: membership testing, intersection, union,
+// difference, symmetric difference and cloning.
+//
+// Package mapset provides two implementations of the Set
+// interface. The default implementation is safe for concurrent
+// access, but a non-thread-safe implementation is also provided for
+// programs that can benefit from the slight speed improvement and
+// that can enforce mutual exclusion through other means.
+package mapset
+
+// Set is the primary interface provided by the mapset package.  It
+// represents an unordered set of data and a large number of
+// operations that can be applied to that set.
+type Set interface {
+	// Adds an element to the set. Returns whether
+	// the item was added.
+	Add(i interface{}) bool
+
+	// Returns the number of elements in the set.
+	Cardinality() int
+
+	// Removes all elements from the set, leaving
+	// the empty set.
+	Clear()
+
+	// Returns a clone of the set using the same
+	// implementation, duplicating all keys.
+	Clone() Set
+
+	// Returns whether the given items
+	// are all in the set.
+	Contains(i ...interface{}) bool
+
+	// Returns the difference between this set
+	// and other. The returned set will contain
+	// all elements of this set that are not also
+	// elements of other.
+	//
+	// Note that the argument to Difference
+	// must be of the same type as the receiver
+	// of the method. Otherwise, Difference will
+	// panic.
+	Difference(other Set) Set
+
+	// Determines if two sets are equal to each
+	// other. If they have the same cardinality
+	// and contain the same elements, they are
+	// considered equal. The order in which
+	// the elements were added is irrelevant.
+	//
+	// Note that the argument to Equal must be
+	// of the same type as the receiver of the
+	// method. Otherwise, Equal will panic.
+	Equal(other Set) bool
+
+	// Returns a new set containing only the elements
+	// that exist only in both sets.
+	//
+	// Note that the argument to Intersect
+	// must be of the same type as the receiver
+	// of the method. Otherwise, Intersect will
+	// panic.
+	Intersect(other Set) Set
+
+	// Determines if every element in this set is in
+	// the other set but the two sets are not equal.
+	//
+	// Note that the argument to IsProperSubset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsProperSubset
+	// will panic.
+	IsProperSubset(other Set) bool
+
+	// Determines if every element in the other set
+	// is in this set but the two sets are not
+	// equal.
+	//
+	// Note that the argument to IsSuperset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsSuperset will
+	// panic.
+	IsProperSuperset(other Set) bool
+
+	// Determines if every element in this set is in
+	// the other set.
+	//
+	// Note that the argument to IsSubset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsSubset will
+	// panic.
+	IsSubset(other Set) bool
+
+	// Determines if every element in the other set
+	// is in this set.
+	//
+	// Note that the argument to IsSuperset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsSuperset will
+	// panic.
+	IsSuperset(other Set) bool
+
+	// Iterates over elements and executes the passed func against each element.
+	// If passed func returns true, stop iteration at the time.
+	Each(func(interface{}) bool)
+
+	// Returns a channel of elements that you can
+	// range over.
+	Iter() <-chan interface{}
+
+	// Returns an Iterator object that you can
+	// use to range over the set.
+	Iterator() *Iterator
+
+	// Remove a single element from the set.
+	Remove(i interface{})
+
+	// Provides a convenient string representation
+	// of the current state of the set.
+	String() string
+
+	// Returns a new set with all elements which are
+	// in either this set or the other set but not in both.
+	//
+	// Note that the argument to SymmetricDifference
+	// must be of the same type as the receiver
+	// of the method. Otherwise, SymmetricDifference
+	// will panic.
+	SymmetricDifference(other Set) Set
+
+	// Returns a new set with all elements in both sets.
+	//
+	// Note that the argument to Union must be of the
+
+	// same type as the receiver of the method.
+	// Otherwise, IsSuperset will panic.
+	Union(other Set) Set
+
+	// Pop removes and returns an arbitrary item from the set.
+	Pop() interface{}
+
+	// Returns all subsets of a given set (Power Set).
+	PowerSet() Set
+
+	// Returns the Cartesian Product of two sets.
+	CartesianProduct(other Set) Set
+
+	// Returns the members of the set as a slice.
+	ToSlice() []interface{}
+}
+
+// NewSet creates and returns a reference to an empty set.  Operations
+// on the resulting set are thread-safe.
+func NewSet(s ...interface{}) Set {
+	set := newThreadSafeSet()
+	for _, item := range s {
+		set.Add(item)
+	}
+	return &set
+}
+
+// NewSetWith creates and returns a new set with the given elements.
+// Operations on the resulting set are thread-safe.
+func NewSetWith(elts ...interface{}) Set {
+	return NewSetFromSlice(elts)
+}
+
+// NewSetFromSlice creates and returns a reference to a set from an
+// existing slice.  Operations on the resulting set are thread-safe.
+func NewSetFromSlice(s []interface{}) Set {
+	a := NewSet(s...)
+	return a
+}
+
+// NewThreadUnsafeSet creates and returns a reference to an empty set.
+// Operations on the resulting set are not thread-safe.
+func NewThreadUnsafeSet() Set {
+	set := newThreadUnsafeSet()
+	return &set
+}
+
+// NewThreadUnsafeSetFromSlice creates and returns a reference to a
+// set from an existing slice.  Operations on the resulting set are
+// not thread-safe.
+func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
+	a := NewThreadUnsafeSet()
+	for _, item := range s {
+		a.Add(item)
+	}
+	return a
+}
diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go
new file mode 100644
index 0000000..269b4ab
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/threadsafe.go
@@ -0,0 +1,283 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import "sync"
+
+type threadSafeSet struct {
+	s threadUnsafeSet
+	sync.RWMutex
+}
+
+func newThreadSafeSet() threadSafeSet {
+	return threadSafeSet{s: newThreadUnsafeSet()}
+}
+
+func (set *threadSafeSet) Add(i interface{}) bool {
+	set.Lock()
+	ret := set.s.Add(i)
+	set.Unlock()
+	return ret
+}
+
+func (set *threadSafeSet) Contains(i ...interface{}) bool {
+	set.RLock()
+	ret := set.s.Contains(i...)
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) IsSubset(other Set) bool {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	ret := set.s.IsSubset(&o.s)
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) IsProperSubset(other Set) bool {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	defer set.RUnlock()
+	o.RLock()
+	defer o.RUnlock()
+
+	return set.s.IsProperSubset(&o.s)
+}
+
+func (set *threadSafeSet) IsSuperset(other Set) bool {
+	return other.IsSubset(set)
+}
+
+func (set *threadSafeSet) IsProperSuperset(other Set) bool {
+	return other.IsProperSubset(set)
+}
+
+func (set *threadSafeSet) Union(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeUnion}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Intersect(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeIntersection}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Difference(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeDifference}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) SymmetricDifference(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeDifference}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Clear() {
+	set.Lock()
+	set.s = newThreadUnsafeSet()
+	set.Unlock()
+}
+
+func (set *threadSafeSet) Remove(i interface{}) {
+	set.Lock()
+	delete(set.s, i)
+	set.Unlock()
+}
+
+func (set *threadSafeSet) Cardinality() int {
+	set.RLock()
+	defer set.RUnlock()
+	return len(set.s)
+}
+
+func (set *threadSafeSet) Each(cb func(interface{}) bool) {
+	set.RLock()
+	for elem := range set.s {
+		if cb(elem) {
+			break
+		}
+	}
+	set.RUnlock()
+}
+
+func (set *threadSafeSet) Iter() <-chan interface{} {
+	ch := make(chan interface{})
+	go func() {
+		set.RLock()
+
+		for elem := range set.s {
+			ch <- elem
+		}
+		close(ch)
+		set.RUnlock()
+	}()
+
+	return ch
+}
+
+func (set *threadSafeSet) Iterator() *Iterator {
+	iterator, ch, stopCh := newIterator()
+
+	go func() {
+		set.RLock()
+	L:
+		for elem := range set.s {
+			select {
+			case <-stopCh:
+				break L
+			case ch <- elem:
+			}
+		}
+		close(ch)
+		set.RUnlock()
+	}()
+
+	return iterator
+}
+
+func (set *threadSafeSet) Equal(other Set) bool {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	ret := set.s.Equal(&o.s)
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Clone() Set {
+	set.RLock()
+
+	unsafeClone := set.s.Clone().(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeClone}
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) String() string {
+	set.RLock()
+	ret := set.s.String()
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) PowerSet() Set {
+	set.RLock()
+	unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet)
+	set.RUnlock()
+
+	ret := &threadSafeSet{s: newThreadUnsafeSet()}
+	for subset := range unsafePowerSet.Iter() {
+		unsafeSubset := subset.(*threadUnsafeSet)
+		ret.Add(&threadSafeSet{s: *unsafeSubset})
+	}
+	return ret
+}
+
+func (set *threadSafeSet) Pop() interface{} {
+	set.Lock()
+	defer set.Unlock()
+	return set.s.Pop()
+}
+
+func (set *threadSafeSet) CartesianProduct(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeCartProduct}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) ToSlice() []interface{} {
+	keys := make([]interface{}, 0, set.Cardinality())
+	set.RLock()
+	for elem := range set.s {
+		keys = append(keys, elem)
+	}
+	set.RUnlock()
+	return keys
+}
+
+func (set *threadSafeSet) MarshalJSON() ([]byte, error) {
+	set.RLock()
+	b, err := set.s.MarshalJSON()
+	set.RUnlock()
+
+	return b, err
+}
+
+func (set *threadSafeSet) UnmarshalJSON(p []byte) error {
+	set.RLock()
+	err := set.s.UnmarshalJSON(p)
+	set.RUnlock()
+
+	return err
+}
diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go
new file mode 100644
index 0000000..10bdd46
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go
@@ -0,0 +1,337 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+type threadUnsafeSet map[interface{}]struct{}
+
+// An OrderedPair represents a 2-tuple of values.
+type OrderedPair struct {
+	First  interface{}
+	Second interface{}
+}
+
+func newThreadUnsafeSet() threadUnsafeSet {
+	return make(threadUnsafeSet)
+}
+
+// Equal says whether two 2-tuples contain the same values in the same order.
+func (pair *OrderedPair) Equal(other OrderedPair) bool {
+	if pair.First == other.First &&
+		pair.Second == other.Second {
+		return true
+	}
+
+	return false
+}
+
+func (set *threadUnsafeSet) Add(i interface{}) bool {
+	_, found := (*set)[i]
+	if found {
+		return false //False if it existed already
+	}
+
+	(*set)[i] = struct{}{}
+	return true
+}
+
+func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
+	for _, val := range i {
+		if _, ok := (*set)[val]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) IsSubset(other Set) bool {
+	_ = other.(*threadUnsafeSet)
+	for elem := range *set {
+		if !other.Contains(elem) {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) IsProperSubset(other Set) bool {
+	return set.IsSubset(other) && !set.Equal(other)
+}
+
+func (set *threadUnsafeSet) IsSuperset(other Set) bool {
+	return other.IsSubset(set)
+}
+
+func (set *threadUnsafeSet) IsProperSuperset(other Set) bool {
+	return set.IsSuperset(other) && !set.Equal(other)
+}
+
+func (set *threadUnsafeSet) Union(other Set) Set {
+	o := other.(*threadUnsafeSet)
+
+	unionedSet := newThreadUnsafeSet()
+
+	for elem := range *set {
+		unionedSet.Add(elem)
+	}
+	for elem := range *o {
+		unionedSet.Add(elem)
+	}
+	return &unionedSet
+}
+
+func (set *threadUnsafeSet) Intersect(other Set) Set {
+	o := other.(*threadUnsafeSet)
+
+	intersection := newThreadUnsafeSet()
+	// loop over smaller set
+	if set.Cardinality() < other.Cardinality() {
+		for elem := range *set {
+			if other.Contains(elem) {
+				intersection.Add(elem)
+			}
+		}
+	} else {
+		for elem := range *o {
+			if set.Contains(elem) {
+				intersection.Add(elem)
+			}
+		}
+	}
+	return &intersection
+}
+
+func (set *threadUnsafeSet) Difference(other Set) Set {
+	_ = other.(*threadUnsafeSet)
+
+	difference := newThreadUnsafeSet()
+	for elem := range *set {
+		if !other.Contains(elem) {
+			difference.Add(elem)
+		}
+	}
+	return &difference
+}
+
+func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
+	_ = other.(*threadUnsafeSet)
+
+	aDiff := set.Difference(other)
+	bDiff := other.Difference(set)
+	return aDiff.Union(bDiff)
+}
+
+func (set *threadUnsafeSet) Clear() {
+	*set = newThreadUnsafeSet()
+}
+
+func (set *threadUnsafeSet) Remove(i interface{}) {
+	delete(*set, i)
+}
+
+func (set *threadUnsafeSet) Cardinality() int {
+	return len(*set)
+}
+
+func (set *threadUnsafeSet) Each(cb func(interface{}) bool) {
+	for elem := range *set {
+		if cb(elem) {
+			break
+		}
+	}
+}
+
+func (set *threadUnsafeSet) Iter() <-chan interface{} {
+	ch := make(chan interface{})
+	go func() {
+		for elem := range *set {
+			ch <- elem
+		}
+		close(ch)
+	}()
+
+	return ch
+}
+
+func (set *threadUnsafeSet) Iterator() *Iterator {
+	iterator, ch, stopCh := newIterator()
+
+	go func() {
+	L:
+		for elem := range *set {
+			select {
+			case <-stopCh:
+				break L
+			case ch <- elem:
+			}
+		}
+		close(ch)
+	}()
+
+	return iterator
+}
+
+func (set *threadUnsafeSet) Equal(other Set) bool {
+	_ = other.(*threadUnsafeSet)
+
+	if set.Cardinality() != other.Cardinality() {
+		return false
+	}
+	for elem := range *set {
+		if !other.Contains(elem) {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) Clone() Set {
+	clonedSet := newThreadUnsafeSet()
+	for elem := range *set {
+		clonedSet.Add(elem)
+	}
+	return &clonedSet
+}
+
+func (set *threadUnsafeSet) String() string {
+	items := make([]string, 0, len(*set))
+
+	for elem := range *set {
+		items = append(items, fmt.Sprintf("%v", elem))
+	}
+	return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
+}
+
+// String outputs a 2-tuple in the form "(A, B)".
+func (pair OrderedPair) String() string {
+	return fmt.Sprintf("(%v, %v)", pair.First, pair.Second)
+}
+
+func (set *threadUnsafeSet) Pop() interface{} {
+	for item := range *set {
+		delete(*set, item)
+		return item
+	}
+	return nil
+}
+
+func (set *threadUnsafeSet) PowerSet() Set {
+	powSet := NewThreadUnsafeSet()
+	nullset := newThreadUnsafeSet()
+	powSet.Add(&nullset)
+
+	for es := range *set {
+		u := newThreadUnsafeSet()
+		j := powSet.Iter()
+		for er := range j {
+			p := newThreadUnsafeSet()
+			if reflect.TypeOf(er).Name() == "" {
+				k := er.(*threadUnsafeSet)
+				for ek := range *(k) {
+					p.Add(ek)
+				}
+			} else {
+				p.Add(er)
+			}
+			p.Add(es)
+			u.Add(&p)
+		}
+
+		powSet = powSet.Union(&u)
+	}
+
+	return powSet
+}
+
+func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
+	o := other.(*threadUnsafeSet)
+	cartProduct := NewThreadUnsafeSet()
+
+	for i := range *set {
+		for j := range *o {
+			elem := OrderedPair{First: i, Second: j}
+			cartProduct.Add(elem)
+		}
+	}
+
+	return cartProduct
+}
+
+func (set *threadUnsafeSet) ToSlice() []interface{} {
+	keys := make([]interface{}, 0, set.Cardinality())
+	for elem := range *set {
+		keys = append(keys, elem)
+	}
+
+	return keys
+}
+
+// MarshalJSON creates a JSON array from the set, it marshals all elements
+func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) {
+	items := make([]string, 0, set.Cardinality())
+
+	for elem := range *set {
+		b, err := json.Marshal(elem)
+		if err != nil {
+			return nil, err
+		}
+
+		items = append(items, string(b))
+	}
+
+	return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil
+}
+
+// UnmarshalJSON recreates a set from a JSON array, it only decodes
+// primitive types. Numbers are decoded as json.Number.
+func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error {
+	var i []interface{}
+
+	d := json.NewDecoder(bytes.NewReader(b))
+	d.UseNumber()
+	err := d.Decode(&i)
+	if err != nil {
+		return err
+	}
+
+	for _, v := range i {
+		switch t := v.(type) {
+		case []interface{}, map[string]interface{}:
+			continue
+		default:
+			set.Add(t)
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 0000000..e256a31
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 0000000..0e9d6ed
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+script:
+  - go test
+  - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000..0200f75
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+	exampleKey: !!binary gIGC
+
+GOOD:
+	exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+type Person struct {
+	Name string `json:"name"` // Affects YAML field names too.
+	Age  int    `json:"age"`
+}
+
+func main() {
+	// Marshal a Person struct to YAML.
+	p := Person{"John", 30}
+	y, err := yaml.Marshal(p)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	age: 30
+	name: John
+	*/
+
+	// Unmarshal the YAML back into a Person struct.
+	var p2 Person
+	err = yaml.Unmarshal(y, &p2)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(p2)
+	/* Output:
+	{John 30}
+	*/
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+func main() {
+	j := []byte(`{"name": "John", "age": 30}`)
+	y, err := yaml.JSONToYAML(j)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	name: John
+	age: 30
+	*/
+	j2, err := yaml.YAMLToJSON(y)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(j2))
+	/* Output:
+	{"age":30,"name":"John"}
+	*/
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000..5860074
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			if v.CanSet() {
+				v.Set(reflect.New(v.Type().Elem()))
+			} else {
+				v = reflect.New(v.Type().Elem())
+			}
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(json.Unmarshaler); ok {
+				return u, nil, reflect.Value{}
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, reflect.Value{}
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    opts.Contains("string"),
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000..4fb4054
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+	j, err := json.Marshal(o)
+	if err != nil {
+		return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+	}
+
+	y, err := JSONToYAML(j)
+	if err != nil {
+		return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+	}
+
+	return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+	vo := reflect.ValueOf(o)
+	j, err := yamlToJSON(y, &vo)
+	if err != nil {
+		return fmt.Errorf("error converting YAML to JSON: %v", err)
+	}
+
+	err = json.Unmarshal(j, o)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling JSON: %v", err)
+	}
+
+	return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+	// Convert the JSON to an object.
+	var jsonObj interface{}
+	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+	// Go JSON library doesn't try to pick the right number type (int, float,
+	// etc.) when unmarshalling to interface{}, it just picks float64
+	// universally. go-yaml does go through the effort of picking the right
+	// number type, so we can preserve number type throughout this process.
+	err := yaml.Unmarshal(j, &jsonObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// Marshal this object into YAML.
+	return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+//   in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+//   use binary data with this library, encode the data as base64 as usual but do
+//   not use the !!binary tag in your YAML. This will ensure the original base64
+//   encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+	// Convert the YAML to an object.
+	var yamlObj interface{}
+	err := yaml.Unmarshal(y, &yamlObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// YAML objects are not completely compatible with JSON objects (e.g. you
+	// can have non-string keys in YAML). So, convert the YAML-compatible object
+	// to a JSON-compatible object, failing with an error if irrecoverable
+	// incompatibilties happen along the way.
+	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert this object to JSON and return the data.
+	return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+	var err error
+
+	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+	// interface). We pass decodingNull as false because we're not actually
+	// decoding into the value, we're just checking if the ultimate target is a
+	// string.
+	if jsonTarget != nil {
+		ju, tu, pv := indirect(*jsonTarget, false)
+		// We have a JSON or Text Umarshaler at this level, so we can't be trying
+		// to decode into a string.
+		if ju != nil || tu != nil {
+			jsonTarget = nil
+		} else {
+			jsonTarget = &pv
+		}
+	}
+
+	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
+	// if so, coerce.  Else return normal.
+	// If yamlObj is a map or array, find the field that each key is
+	// unmarshaling to, and when you recurse pass the reflect.Value for that
+	// field back into this function.
+	switch typedYAMLObj := yamlObj.(type) {
+	case map[interface{}]interface{}:
+		// JSON does not support arbitrary keys in a map, so we must convert
+		// these keys to strings.
+		//
+		// From my reading of go-yaml v2 (specifically the resolve function),
+		// keys can only have the types string, int, int64, float64, binary
+		// (unsupported), or null (unsupported).
+		strMap := make(map[string]interface{})
+		for k, v := range typedYAMLObj {
+			// Resolve the key to a string first.
+			var keyString string
+			switch typedKey := k.(type) {
+			case string:
+				keyString = typedKey
+			case int:
+				keyString = strconv.Itoa(typedKey)
+			case int64:
+				// go-yaml will only return an int64 as a key if the system
+				// architecture is 32-bit and the key's value is between 32-bit
+				// and 64-bit. Otherwise the key type will simply be int.
+				keyString = strconv.FormatInt(typedKey, 10)
+			case float64:
+				// Stolen from go-yaml to use the same conversion to string as
+				// the go-yaml library uses to convert float to string when
+				// Marshaling.
+				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+				switch s {
+				case "+Inf":
+					s = ".inf"
+				case "-Inf":
+					s = "-.inf"
+				case "NaN":
+					s = ".nan"
+				}
+				keyString = s
+			case bool:
+				if typedKey {
+					keyString = "true"
+				} else {
+					keyString = "false"
+				}
+			default:
+				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+					reflect.TypeOf(k), k, v)
+			}
+
+			// jsonTarget should be a struct or a map. If it's a struct, find
+			// the field it's going to map to and pass its reflect.Value. If
+			// it's a map, find the element type of the map and pass the
+			// reflect.Value created from that type. If it's neither, just pass
+			// nil - JSON conversion will error for us if it's a real issue.
+			if jsonTarget != nil {
+				t := *jsonTarget
+				if t.Kind() == reflect.Struct {
+					keyBytes := []byte(keyString)
+					// Find the field that the JSON library would use.
+					var f *field
+					fields := cachedTypeFields(t.Type())
+					for i := range fields {
+						ff := &fields[i]
+						if bytes.Equal(ff.nameBytes, keyBytes) {
+							f = ff
+							break
+						}
+						// Do case-insensitive comparison.
+						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+							f = ff
+						}
+					}
+					if f != nil {
+						// Find the reflect.Value of the most preferential
+						// struct field.
+						jtf := t.Field(f.index[0])
+						strMap[keyString], err = convertToJSONableObject(v, &jtf)
+						if err != nil {
+							return nil, err
+						}
+						continue
+					}
+				} else if t.Kind() == reflect.Map {
+					// Create a zero value of the map's element type to use as
+					// the JSON target.
+					jtv := reflect.Zero(t.Type().Elem())
+					strMap[keyString], err = convertToJSONableObject(v, &jtv)
+					if err != nil {
+						return nil, err
+					}
+					continue
+				}
+			}
+			strMap[keyString], err = convertToJSONableObject(v, nil)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return strMap, nil
+	case []interface{}:
+		// We need to recurse into arrays in case there are any
+		// map[interface{}]interface{}'s inside and to convert any
+		// numbers to strings.
+
+		// If jsonTarget is a slice (which it really should be), find the
+		// thing it's going to map to. If it's not a slice, just pass nil
+		// - JSON conversion will error for us if it's a real issue.
+		var jsonSliceElemValue *reflect.Value
+		if jsonTarget != nil {
+			t := *jsonTarget
+			if t.Kind() == reflect.Slice {
+				// By default slices point to nil, but we need a reflect.Value
+				// pointing to a value of the slice type, so we create one here.
+				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+				jsonSliceElemValue = &ev
+			}
+		}
+
+		// Make and use a new array.
+		arr := make([]interface{}, len(typedYAMLObj))
+		for i, v := range typedYAMLObj {
+			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return arr, nil
+	default:
+		// If the target type is a string and the YAML type is a number,
+		// convert the YAML type to a string.
+		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+			// Based on my reading of go-yaml, it may return int, int64,
+			// float64, or uint64.
+			var s string
+			switch typedVal := typedYAMLObj.(type) {
+			case int:
+				s = strconv.FormatInt(int64(typedVal), 10)
+			case int64:
+				s = strconv.FormatInt(typedVal, 10)
+			case float64:
+				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+			case uint64:
+				s = strconv.FormatUint(typedVal, 10)
+			case bool:
+				if typedVal {
+					s = "true"
+				} else {
+					s = "false"
+				}
+			}
+			if len(s) > 0 {
+				yamlObj = interface{}(s)
+			}
+		}
+		return yamlObj, nil
+	}
+
+	return nil, nil
+}
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
new file mode 100644
index 0000000..387b4eb
--- /dev/null
+++ b/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+	https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+	Package glog implements logging analogous to the Google-internal
+	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
+	Error, Fatal, plus formatting variants such as Infof. It
+	also provides V-style logging controlled by the -v and
+	-vmodule=file=2 flags.
+	
+	Basic examples:
+	
+		glog.Info("Prepare to repel boarders")
+	
+		glog.Fatalf("Initialization failed: %s", err)
+	
+	See the documentation for the V function for an explanation
+	of these examples:
+	
+		if glog.V(2) {
+			glog.Info("Starting transaction...")
+		}
+	
+		glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..54bd7af
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1180 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	glog.Info("Prepare to repel boarders")
+//
+//	glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if glog.V(2) {
+//		glog.Info("Starting transaction...")
+//	}
+//
+//	glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=false
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.Atoi(value)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.Atoi(patLev[1])
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		t.line = 0
+		t.file = ""
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+func init() {
+	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flag.Var(&logging.verbosity, "v", "log level for V logs")
+	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+	// Default stderrThreshold is ERROR.
+	logging.stderrThreshold = errorLog
+
+	logging.setVState(0, nil, false)
+	go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	logging.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&logging.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		logging.vmodule.filter = filter
+		logging.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+	logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.tmp[n+2] = ' '
+	buf.Write(buf.tmp[:n+3])
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintln(buf, args...)
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+	l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if !flag.Parsed() {
+		os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
+		os.Stderr.Write(data)
+	} else if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.file[s] == nil {
+			if err := l.createFiles(s); err != nil {
+				os.Stderr.Write(data) // Make sure the message appears somewhere.
+				l.exit(err)
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.file[fatalLog].Write(data)
+			fallthrough
+		case errorLog:
+			l.file[errorLog].Write(data)
+			fallthrough
+		case warningLog:
+			l.file[warningLog].Write(data)
+			fallthrough
+		case infoLog:
+			l.file[infoLog].Write(data)
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		// First, make sure we see the trace for the current goroutine on standard error.
+		// If -logtostderr has been specified, the loop below will do that anyway
+		// as the first stack in the full dump.
+		if !l.toStderr {
+			os.Stderr.Write(stacks(false))
+		}
+		// Write the stack trace for all goroutines to the files.
+		trace := stacks(true)
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file   *os.File
+	sev    severity
+	nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= MaxSize {
+		if err := sb.rotateFile(time.Now()); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now)
+	sb.nbytes = 0
+	if err != nil {
+		return err
+	}
+
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger: l,
+			sev:    s,
+		}
+		if err := sb.rotateFile(now); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for _ = range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2) { glog.Info("log this") }
+// or
+//	glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return Verbose(true)
+	}
+
+	// It's off globally but it vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return Verbose(false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return Verbose(v >= level)
+	}
+	return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v {
+		logging.print(infoLog, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v {
+		logging.println(infoLog, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v {
+		logging.printf(infoLog, format, args...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, format, args...)
+}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+	if *logDir != "" {
+		logDirs = append(logDirs, *logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid      = os.Getpid()
+	program  = filepath.Base(os.Args[0])
+	host     = "unknownhost"
+	userName = "unknownuser"
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		host = shortHostname(h)
+	}
+
+	current, err := user.Current()
+	if err == nil {
+		userName = current.Username
+	}
+
+	// Sanitize userName since it may contain filepath separators on Windows.
+	userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		userName,
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := os.Create(fname)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
new file mode 100644
index 0000000..ac7e51b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
+	r, err := gzip.NewReader(bytes.NewReader(gz))
+	if err != nil {
+		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+	}
+	defer r.Close()
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+	}
+
+	fd := new(protobuf.FileDescriptorProto)
+	if err := proto.Unmarshal(b, fd); err != nil {
+		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+	}
+
+	return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
+	gz, path := msg.Descriptor()
+	fd, err := extractFile(gz)
+	if err != nil {
+		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+	}
+
+	md = fd.MessageType[path[0]]
+	for _, i := range path[1:] {
+		md = md.NestedType[i]
+	}
+	return fd, md
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..e9cc202
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1284 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+const maxSecondsInDuration = 315576000000
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+	// Whether to render enum values as integers, as opposed to string values.
+	EnumsAsInts bool
+
+	// Whether to render fields with zero values.
+	EmitDefaults bool
+
+	// A string to indent each level by. The presence of this field will
+	// also cause a space to appear between the field separator and
+	// value, and for newlines to be appear between fields and array
+	// elements.
+	Indent string
+
+	// Whether to use the original (.proto) name for fields.
+	OrigName bool
+
+	// A custom URL resolver to use when marshaling Any messages to JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+	// Only the part of typeUrl after the last slash is relevant.
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+	v := reflect.ValueOf(pb)
+	if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return errors.New("Marshal called with nil")
+	}
+	// Check for unset required fields first.
+	if err := checkRequiredFields(pb); err != nil {
+		return err
+	}
+	writer := &errWriter{writer: out}
+	return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+	var buf bytes.Buffer
+	if err := m.Marshal(&buf, pb); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+	if jsm, ok := v.(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(m)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if m.Indent != "" {
+				b, err = json.MarshalIndent(js, indent, m.Indent)
+			} else {
+				b, err = json.Marshal(js)
+			}
+			if err != nil {
+				return err
+			}
+		}
+
+		out.write(string(b))
+		return out.err
+	}
+
+	s := reflect.ValueOf(v).Elem()
+
+	// Handle well-known types.
+	if wkt, ok := v.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			// "Wrappers use the same representation in JSON
+			//  as the wrapped primitive type, ..."
+			sprop := proto.GetProperties(s.Type())
+			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+		case "Any":
+			// Any is a bit more involved.
+			return m.marshalAny(out, v, indent)
+		case "Duration":
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+				return fmt.Errorf("seconds out of range %v", s)
+			}
+			if ns <= -secondInNanos || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+			}
+			if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+				return errors.New("signs of seconds and nanos do not match")
+			}
+			// Generated output always contains 0, 3, 6, or 9 fractional digits,
+			// depending on required precision, followed by the suffix "s".
+			f := "%d.%09d"
+			if ns < 0 {
+				ns = -ns
+				if s == 0 {
+					f = "-%d.%09d"
+				}
+			}
+			x := fmt.Sprintf(f, s, ns)
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`s"`)
+			return out.err
+		case "Struct", "ListValue":
+			// Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+		case "Timestamp":
+			// "RFC 3339, where generated output will always be Z-normalized
+			//  and uses 0, 3, 6 or 9 fractional digits."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns < 0 || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+			}
+			t := time.Unix(s, ns).UTC()
+			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+			x := t.Format("2006-01-02T15:04:05.000000000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`Z"`)
+			return out.err
+		case "Value":
+			// Value has a single oneof.
+			kind := s.Field(0)
+			if kind.IsNil() {
+				// "absence of any variant indicates an error"
+				return errors.New("nil Value")
+			}
+			// oneof -> *T -> T -> T.F
+			x := kind.Elem().Elem().Field(0)
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, x, indent)
+		}
+	}
+
+	out.write("{")
+	if m.Indent != "" {
+		out.write("\n")
+	}
+
+	firstField := true
+
+	if typeURL != "" {
+		if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < s.NumField(); i++ {
+		value := s.Field(i)
+		valueField := s.Type().Field(i)
+		if strings.HasPrefix(valueField.Name, "XXX_") {
+			continue
+		}
+
+		// IsNil will panic on most value kinds.
+		switch value.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Interface:
+			if value.IsNil() {
+				continue
+			}
+		}
+
+		if !m.EmitDefaults {
+			switch value.Kind() {
+			case reflect.Bool:
+				if !value.Bool() {
+					continue
+				}
+			case reflect.Int32, reflect.Int64:
+				if value.Int() == 0 {
+					continue
+				}
+			case reflect.Uint32, reflect.Uint64:
+				if value.Uint() == 0 {
+					continue
+				}
+			case reflect.Float32, reflect.Float64:
+				if value.Float() == 0 {
+					continue
+				}
+			case reflect.String:
+				if value.Len() == 0 {
+					continue
+				}
+			case reflect.Map, reflect.Ptr, reflect.Slice:
+				if value.IsNil() {
+					continue
+				}
+			}
+		}
+
+		// Oneof fields need special handling.
+		if valueField.Tag.Get("protobuf_oneof") != "" {
+			// value is an interface containing &T{real_value}.
+			sv := value.Elem().Elem() // interface -> *T -> T
+			value = sv.Field(0)
+			valueField = sv.Type().Field(0)
+		}
+		prop := jsonProperties(valueField, m.OrigName)
+		if !firstField {
+			m.writeSep(out)
+		}
+		if err := m.marshalField(out, prop, value, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if ep, ok := v.(proto.Message); ok {
+		extensions := proto.RegisteredExtensions(v)
+		// Sort extensions for stable output.
+		ids := make([]int32, 0, len(extensions))
+		for id, desc := range extensions {
+			if !proto.HasExtension(ep, desc) {
+				continue
+			}
+			ids = append(ids, id)
+		}
+		sort.Sort(int32Slice(ids))
+		for _, id := range ids {
+			desc := extensions[id]
+			if desc == nil {
+				// unknown extension
+				continue
+			}
+			ext, extErr := proto.GetExtension(ep, desc)
+			if extErr != nil {
+				return extErr
+			}
+			value := reflect.ValueOf(ext)
+			var prop proto.Properties
+			prop.Parse(desc.Tag)
+			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+			if !firstField {
+				m.writeSep(out)
+			}
+			if err := m.marshalField(out, &prop, value, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+
+	}
+
+	if m.Indent != "" {
+		out.write("\n")
+		out.write(indent)
+	}
+	out.write("}")
+	return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+	if m.Indent != "" {
+		out.write(",\n")
+	} else {
+		out.write(",")
+	}
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	v := reflect.ValueOf(any).Elem()
+	turl := v.Field(0).String()
+	val := v.Field(1).Bytes()
+
+	var msg proto.Message
+	var err error
+	if m.AnyResolver != nil {
+		msg, err = m.AnyResolver.Resolve(turl)
+	} else {
+		msg, err = defaultResolveAny(turl)
+	}
+	if err != nil {
+		return err
+	}
+
+	if err := proto.Unmarshal(val, msg); err != nil {
+		return err
+	}
+
+	if _, ok := msg.(wkt); ok {
+		out.write("{")
+		if m.Indent != "" {
+			out.write("\n")
+		}
+		if err := m.marshalTypeURL(out, indent, turl); err != nil {
+			return err
+		}
+		m.writeSep(out)
+		if m.Indent != "" {
+			out.write(indent)
+			out.write(m.Indent)
+			out.write(`"value": `)
+		} else {
+			out.write(`"value":`)
+		}
+		if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+			return err
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+		}
+		out.write("}")
+		return out.err
+	}
+
+	return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"@type":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	out.write(string(b))
+	return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"`)
+	out.write(prop.JSONName)
+	out.write(`":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	if err := m.marshalValue(out, prop, v, indent); err != nil {
+		return err
+	}
+	return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	var err error
+	v = reflect.Indirect(v)
+
+	// Handle nil pointer
+	if v.Kind() == reflect.Invalid {
+		out.write("null")
+		return out.err
+	}
+
+	// Handle repeated elements.
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+		out.write("[")
+		comma := ""
+		for i := 0; i < v.Len(); i++ {
+			sliceVal := v.Index(i)
+			out.write(comma)
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+			if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write("]")
+		return out.err
+	}
+
+	// Handle well-known types.
+	// Most are handled up in marshalObject (because 99% are messages).
+	if wkt, ok := v.Interface().(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "NullValue":
+			out.write("null")
+			return out.err
+		}
+	}
+
+	// Handle enumerations.
+	if !m.EnumsAsInts && prop.Enum != "" {
+		// Unknown enum values will are stringified by the proto library as their
+		// value. Such values should _not_ be quoted or they will be interpreted
+		// as an enum string instead of their value.
+		enumStr := v.Interface().(fmt.Stringer).String()
+		var valStr string
+		if v.Kind() == reflect.Ptr {
+			valStr = strconv.Itoa(int(v.Elem().Int()))
+		} else {
+			valStr = strconv.Itoa(int(v.Int()))
+		}
+		isKnownEnum := enumStr != valStr
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		out.write(enumStr)
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		return out.err
+	}
+
+	// Handle nested messages.
+	if v.Kind() == reflect.Struct {
+		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+	}
+
+	// Handle maps.
+	// Since Go randomizes map iteration, we sort keys for stable output.
+	if v.Kind() == reflect.Map {
+		out.write(`{`)
+		keys := v.MapKeys()
+		sort.Sort(mapKeys(keys))
+		for i, k := range keys {
+			if i > 0 {
+				out.write(`,`)
+			}
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+
+			// TODO handle map key prop properly
+			b, err := json.Marshal(k.Interface())
+			if err != nil {
+				return err
+			}
+			s := string(b)
+
+			// If the JSON is not a string value, encode it again to make it one.
+			if !strings.HasPrefix(s, `"`) {
+				b, err := json.Marshal(s)
+				if err != nil {
+					return err
+				}
+				s = string(b)
+			}
+
+			out.write(s)
+			out.write(`:`)
+			if m.Indent != "" {
+				out.write(` `)
+			}
+
+			vprop := prop
+			if prop != nil && prop.MapValProp != nil {
+				vprop = prop.MapValProp
+			}
+			if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+				return err
+			}
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write(`}`)
+		return out.err
+	}
+
+	// Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		f := v.Float()
+		var sval string
+		switch {
+		case math.IsInf(f, 1):
+			sval = `"Infinity"`
+		case math.IsInf(f, -1):
+			sval = `"-Infinity"`
+		case math.IsNaN(f):
+			sval = `"NaN"`
+		}
+		if sval != "" {
+			out.write(sval)
+			return out.err
+		}
+	}
+
+	// Default handling defers to the encoding/json library.
+	b, err := json.Marshal(v.Interface())
+	if err != nil {
+		return err
+	}
+	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+	if needToQuote {
+		out.write(`"`)
+	}
+	out.write(string(b))
+	if needToQuote {
+		out.write(`"`)
+	}
+	return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// Whether to allow messages to contain unknown fields, as opposed to
+	// failing to unmarshal.
+	AllowUnknownFields bool
+
+	// A custom URL resolver to use when unmarshaling Any messages from JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	inputValue := json.RawMessage{}
+	if err := dec.Decode(&inputValue); err != nil {
+		return err
+	}
+	if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+		return err
+	}
+	return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+	dec := json.NewDecoder(r)
+	return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+	targetType := target.Type()
+
+	// Allocate memory for pointer fields.
+	if targetType.Kind() == reflect.Ptr {
+		// If input value is "null" and target is a pointer type, then the field should be treated as not set
+		// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+		_, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+		if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+			return nil
+		}
+		target.Set(reflect.New(targetType.Elem()))
+
+		return u.unmarshalValue(target.Elem(), inputValue, prop)
+	}
+
+	if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+	}
+
+	// Handle well-known types that are not pointers.
+	if w, ok := target.Addr().Interface().(wkt); ok {
+		switch w.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			return u.unmarshalValue(target.Field(0), inputValue, prop)
+		case "Any":
+			// Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+			// 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+			// https://github.com/golang/go/issues/14493
+			var jsonFields map[string]*json.RawMessage
+			if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+				return err
+			}
+
+			val, ok := jsonFields["@type"]
+			if !ok || val == nil {
+				return errors.New("Any JSON doesn't have '@type'")
+			}
+
+			var turl string
+			if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+				return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+			}
+			target.Field(0).SetString(turl)
+
+			var m proto.Message
+			var err error
+			if u.AnyResolver != nil {
+				m, err = u.AnyResolver.Resolve(turl)
+			} else {
+				m, err = defaultResolveAny(turl)
+			}
+			if err != nil {
+				return err
+			}
+
+			if _, ok := m.(wkt); ok {
+				val, ok := jsonFields["value"]
+				if !ok {
+					return errors.New("Any JSON doesn't have 'value'")
+				}
+
+				if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			} else {
+				delete(jsonFields, "@type")
+				nestedProto, err := json.Marshal(jsonFields)
+				if err != nil {
+					return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+				}
+
+				if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			}
+
+			b, err := proto.Marshal(m)
+			if err != nil {
+				return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+			}
+			target.Field(1).SetBytes(b)
+
+			return nil
+		case "Duration":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			d, err := time.ParseDuration(unq)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			target.Field(0).SetInt(s)
+			target.Field(1).SetInt(ns)
+			return nil
+		case "Timestamp":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, unq)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+
+			target.Field(0).SetInt(t.Unix())
+			target.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Struct":
+			var m map[string]json.RawMessage
+			if err := json.Unmarshal(inputValue, &m); err != nil {
+				return fmt.Errorf("bad StructValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+			for k, jv := range m {
+				pv := &stpb.Value{}
+				if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+					return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+				}
+				target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+			}
+			return nil
+		case "ListValue":
+			var s []json.RawMessage
+			if err := json.Unmarshal(inputValue, &s); err != nil {
+				return fmt.Errorf("bad ListValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+			for i, sv := range s {
+				if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+					return err
+				}
+			}
+			return nil
+		case "Value":
+			ivStr := string(inputValue)
+			if ivStr == "null" {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+			} else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+			} else if v, err := unquote(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+			} else if v, err := strconv.ParseBool(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+			} else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+				lv := &stpb.ListValue{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+				return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+			} else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+				sv := &stpb.Struct{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+				return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+			} else {
+				return fmt.Errorf("unrecognized type for Value %q", ivStr)
+			}
+			return nil
+		}
+	}
+
+	// Handle enums, which have an underlying type of int32,
+	// and may appear as strings.
+	// The case of an enum appearing as a number is handled
+	// at the bottom of this function.
+	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+		vmap := proto.EnumValueMap(prop.Enum)
+		// Don't need to do unquoting; valid enum names
+		// are from a limited character set.
+		s := inputValue[1 : len(inputValue)-1]
+		n, ok := vmap[string(s)]
+		if !ok {
+			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+		}
+		if target.Kind() == reflect.Ptr { // proto2
+			target.Set(reflect.New(targetType.Elem()))
+			target = target.Elem()
+		}
+		if targetType.Kind() != reflect.Int32 {
+			return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+		}
+		target.SetInt(int64(n))
+		return nil
+	}
+
+	// Handle nested messages.
+	if targetType.Kind() == reflect.Struct {
+		var jsonFields map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+			return err
+		}
+
+		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+			// Be liberal in what names we accept; both orig_name and camelName are okay.
+			fieldNames := acceptedJSONFieldNames(prop)
+
+			vOrig, okOrig := jsonFields[fieldNames.orig]
+			vCamel, okCamel := jsonFields[fieldNames.camel]
+			if !okOrig && !okCamel {
+				return nil, false
+			}
+			// If, for some reason, both are present in the data, favour the camelName.
+			var raw json.RawMessage
+			if okOrig {
+				raw = vOrig
+				delete(jsonFields, fieldNames.orig)
+			}
+			if okCamel {
+				raw = vCamel
+				delete(jsonFields, fieldNames.camel)
+			}
+			return raw, true
+		}
+
+		sprops := proto.GetProperties(targetType)
+		for i := 0; i < target.NumField(); i++ {
+			ft := target.Type().Field(i)
+			if strings.HasPrefix(ft.Name, "XXX_") {
+				continue
+			}
+
+			valueForField, ok := consumeField(sprops.Prop[i])
+			if !ok {
+				continue
+			}
+
+			if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+				return err
+			}
+		}
+		// Check for any oneof fields.
+		if len(jsonFields) > 0 {
+			for _, oop := range sprops.OneofTypes {
+				raw, ok := consumeField(oop.Prop)
+				if !ok {
+					continue
+				}
+				nv := reflect.New(oop.Type.Elem())
+				target.Field(oop.Field).Set(nv)
+				if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+					return err
+				}
+			}
+		}
+		// Handle proto2 extensions.
+		if len(jsonFields) > 0 {
+			if ep, ok := target.Addr().Interface().(proto.Message); ok {
+				for _, ext := range proto.RegisteredExtensions(ep) {
+					name := fmt.Sprintf("[%s]", ext.Name)
+					raw, ok := jsonFields[name]
+					if !ok {
+						continue
+					}
+					delete(jsonFields, name)
+					nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+					if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+						return err
+					}
+					if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		if !u.AllowUnknownFields && len(jsonFields) > 0 {
+			// Pick any field to be the scapegoat.
+			var f string
+			for fname := range jsonFields {
+				f = fname
+				break
+			}
+			return fmt.Errorf("unknown field %q in %v", f, targetType)
+		}
+		return nil
+	}
+
+	// Handle arrays (which aren't encoded bytes)
+	if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+		var slc []json.RawMessage
+		if err := json.Unmarshal(inputValue, &slc); err != nil {
+			return err
+		}
+		if slc != nil {
+			l := len(slc)
+			target.Set(reflect.MakeSlice(targetType, l, l))
+			for i := 0; i < l; i++ {
+				if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	}
+
+	// Handle maps (whose keys are always strings)
+	if targetType.Kind() == reflect.Map {
+		var mp map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &mp); err != nil {
+			return err
+		}
+		if mp != nil {
+			target.Set(reflect.MakeMap(targetType))
+			for ks, raw := range mp {
+				// Unmarshal map key. The core json library already decoded the key into a
+				// string, so we handle that specially. Other types were quoted post-serialization.
+				var k reflect.Value
+				if targetType.Key().Kind() == reflect.String {
+					k = reflect.ValueOf(ks)
+				} else {
+					k = reflect.New(targetType.Key()).Elem()
+					var kprop *proto.Properties
+					if prop != nil && prop.MapKeyProp != nil {
+						kprop = prop.MapKeyProp
+					}
+					if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+						return err
+					}
+				}
+
+				// Unmarshal map value.
+				v := reflect.New(targetType.Elem()).Elem()
+				var vprop *proto.Properties
+				if prop != nil && prop.MapValProp != nil {
+					vprop = prop.MapValProp
+				}
+				if err := u.unmarshalValue(v, raw, vprop); err != nil {
+					return err
+				}
+				target.SetMapIndex(k, v)
+			}
+		}
+		return nil
+	}
+
+	// Non-finite numbers can be encoded as strings.
+	isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isFloat {
+		if num, ok := nonFinite[string(inputValue)]; ok {
+			target.SetFloat(num)
+			return nil
+		}
+	}
+
+	// integers & floats can be encoded as strings. In this case we drop
+	// the quotes and proceed as normal.
+	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+		targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+		targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isNum && strings.HasPrefix(string(inputValue), `"`) {
+		inputValue = inputValue[1 : len(inputValue)-1]
+	}
+
+	// Use the encoding/json for parsing other value types.
+	return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+	var ret string
+	err := json.Unmarshal([]byte(s), &ret)
+	return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+	var prop proto.Properties
+	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+	if origName || prop.JSONName == "" {
+		prop.JSONName = prop.OrigName
+	}
+	return &prop
+}
+
+type fieldNames struct {
+	orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+	if prop.JSONName != "" {
+		opts.camel = prop.JSONName
+	}
+	return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+	writer io.Writer
+	err    error
+}
+
+func (w *errWriter) write(str string) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int      { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+	if k := s[i].Kind(); k == s[j].Kind() {
+		switch k {
+		case reflect.String:
+			return s[i].String() < s[j].String()
+		case reflect.Int32, reflect.Int64:
+			return s[i].Int() < s[j].Int()
+		case reflect.Uint32, reflect.Uint64:
+			return s[i].Uint() < s[j].Uint()
+		}
+	}
+	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal.  While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+	// Most well-known type messages do not contain required fields.  The "Any" type may contain
+	// a message that has required fields.
+	//
+	// When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+	// field in order to transform that into JSON, and that should have returned an error if a
+	// required field is not set in the embedded message.
+	//
+	// When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+	// embedded message to store the serialized message in Any.Value field, and that should have
+	// returned an error if a required field is not set.
+	if _, ok := pb.(wkt); ok {
+		return nil
+	}
+
+	v := reflect.ValueOf(pb)
+	// Skip message if it is not a struct pointer.
+	if v.Kind() != reflect.Ptr {
+		return nil
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return nil
+	}
+
+	for i := 0; i < v.NumField(); i++ {
+		field := v.Field(i)
+		sfield := v.Type().Field(i)
+
+		if sfield.PkgPath != "" {
+			// blank PkgPath means the field is exported; skip if not exported
+			continue
+		}
+
+		if strings.HasPrefix(sfield.Name, "XXX_") {
+			continue
+		}
+
+		// Oneof field is an interface implemented by wrapper structs containing the actual oneof
+		// field, i.e. an interface containing &T{real_value}.
+		if sfield.Tag.Get("protobuf_oneof") != "" {
+			if field.Kind() != reflect.Interface {
+				continue
+			}
+			v := field.Elem()
+			if v.Kind() != reflect.Ptr || v.IsNil() {
+				continue
+			}
+			v = v.Elem()
+			if v.Kind() != reflect.Struct || v.NumField() < 1 {
+				continue
+			}
+			field = v.Field(0)
+			sfield = v.Type().Field(0)
+		}
+
+		protoTag := sfield.Tag.Get("protobuf")
+		if protoTag == "" {
+			continue
+		}
+		var prop proto.Properties
+		prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+		switch field.Kind() {
+		case reflect.Map:
+			if field.IsNil() {
+				continue
+			}
+			// Check each map value.
+			keys := field.MapKeys()
+			for _, k := range keys {
+				v := field.MapIndex(k)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Slice:
+			// Handle non-repeated type, e.g. bytes.
+			if !prop.Repeated {
+				if prop.Required && field.IsNil() {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+
+			// Handle repeated type.
+			if field.IsNil() {
+				continue
+			}
+			// Check each slice item.
+			for i := 0; i < field.Len(); i++ {
+				v := field.Index(i)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Ptr:
+			if field.IsNil() {
+				if prop.Required {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+			if err := checkRequiredFieldsInValue(field); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Handle proto2 extensions.
+	for _, ext := range proto.RegisteredExtensions(pb) {
+		if !proto.HasExtension(pb, ext) {
+			continue
+		}
+		ep, err := proto.GetExtension(pb, ext)
+		if err != nil {
+			return err
+		}
+		err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+	if pm, ok := v.Interface().(proto.Message); ok {
+		return checkRequiredFields(pm)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							di.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						di.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f9b6e41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1 := extensionAsLegacyType(e1.value)
+		m2 := extensionAsLegacyType(e2.value)
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..fa88add
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc *ExtensionDesc
+
+	// value is a concrete value for the extension field. Let the type of
+	// desc.ExtensionType be the "API type" and the type of Extension.value
+	// be the "storage type". The API type and storage type are the same except:
+	//	* For scalars (except []byte), the API type uses *T,
+	//	while the storage type uses T.
+	//	* For repeated fields, the API type uses []T, while the storage type
+	//	uses *[]T.
+	//
+	// The reason for the divergence is so that the storage type more naturally
+	// matches what is expected of when retrieving the values through the
+	// protobuf reflection APIs.
+	//
+	// The value may only be populated if desc is also populated.
+	value interface{}
+
+	// enc is the raw bytes for the extension field.
+	enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if err := checkExtensionTypes(epb, extension); err != nil {
+			return nil, err
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return extensionAsLegacyType(e.value), nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = extensionAsStorageType(v)
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		// Represent primitive types as a pointer to the value.
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Slice:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	}
+	return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	case reflect.Slice:
+		// Represent slice types as a pointer to the value.
+		if rv.Type().Elem().Kind() != reflect.Uint8 {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			v = rv2.Interface()
+		}
+	}
+	return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	if deref {
+		u = u.Elem()
+	}
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	} else {
+		// The interface is not of pointer type. The data word is the pointer
+		// to the data.
+		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+	}
+	if deref {
+		p.p = *(*unsafe.Pointer)(p.p)
+	}
+	return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..a4b8c0c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+
+	stype reflect.Type      // set for struct types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		log.Printf("proto: tag has too few fields: %q", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		log.Printf("proto: tag has unknown wire type: %q", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	switch t1 := typ; t1.Kind() {
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+
+	case reflect.Slice:
+		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+			p.stype = t2.Elem()
+		}
+
+	case reflect.Map:
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	var oots []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oots = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oots = m.XXX_OneofWrappers()
+	}
+	if len(oots) > 0 {
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+	deref     bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.sizecache = invalidField
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			u.v1extensions = toField(&f)
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+		t = t.Elem()
+	}
+	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	var deref bool
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		t = reflect.PtrTo(t)
+		deref = true
+	}
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizer,
+		marshaler: marshaler,
+		isptr:     t.Kind() == reflect.Ptr,
+		deref:     deref,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizer,
+			marshaler: marshaler,
+		}
+	}
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if slice {
+				return makeMessageSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeMessageMarshaler(getMarshalInfo(t))
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, err := m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("message field %s without pointer", tf))
+			case isSlice: // E.g., []*pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mi.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mi.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+				panic("bad type for XXX_extensions field: " + f.Type.Name())
+			}
+			u.oldExtensions = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+	for _, v := range oneofImplementers {
+		tptr := reflect.TypeOf(v) // *Msg_X
+		typ := tptr.Elem()        // Msg_X
+
+		f := typ.Field(0) // oneof implementers have one field
+		baseUnmarshal := fieldUnmarshaler(&f)
+		tags := strings.Split(f.Tag.Get("protobuf"), ",")
+		fieldNum, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tags[1])
+		}
+		var name string
+		for _, tag := range tags {
+			if strings.HasPrefix(tag, "name=") {
+				name = strings.TrimPrefix(tag, "name=")
+				break
+			}
+		}
+
+		// Find the oneof field that this struct implements.
+		// Might take O(n^2) to process all of the oneofs, but who cares.
+		for _, of := range oneofFields {
+			if tptr.Implements(of.ityp) {
+				// We have found the corresponding interface for this struct.
+				// That lets us know where this struct should be stored
+				// when we encounter it during unmarshaling.
+				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+				u.setTag(fieldNum, of.field, unmarshal, 0, name)
+			}
+		}
+
+	}
+
+	// Get extension ranges, if any.
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..1ded05b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2887 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	// Tag-delimited aggregate.
+	// Group type is deprecated and not supported in proto3. However, Proto3
+	// implementations should still be able to parse the group wire format and
+	// treat group fields as unknown fields.
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
+	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+	0: "IDEMPOTENCY_UNKNOWN",
+	1: "NO_SIDE_EFFECTS",
+	2: "IDEMPOTENT",
+}
+
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+	"IDEMPOTENCY_UNKNOWN": 0,
+	"NO_SIDE_EFFECTS":     1,
+	"IDEMPOTENT":          2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+	p := new(MethodOptions_IdempotencyLevel)
+	*p = x
+	return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+	if err != nil {
+		return err
+	}
+	*x = MethodOptions_IdempotencyLevel(value)
+	return nil
+}
+
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()    {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{0}
+}
+
+func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
+}
+func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
+}
+func (m *FileDescriptorSet) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorSet.Size(m)
+}
+func (m *FileDescriptorSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()    {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{1}
+}
+
+func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
+}
+func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
+}
+func (m *FileDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorProto.Size(m)
+}
+func (m *FileDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()    {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2}
+}
+
+func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
+}
+func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
+}
+func (m *DescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto.Size(m)
+}
+func (m *DescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
+}
+func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+type ExtensionRangeOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage()    {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
+}
+func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
+}
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
+}
+func (m *ExtensionRangeOptions) XXX_Size() int {
+	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
+}
+func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()    {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4}
+}
+
+func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
+}
+func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
+}
+func (m *FieldDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FieldDescriptorProto.Size(m)
+}
+func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()    {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{5}
+}
+
+func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
+}
+func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
+}
+func (m *OneofDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_OneofDescriptorProto.Size(m)
+}
+func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	// Range of reserved numeric values. Reserved numeric values may not be used
+	// by enum values in the same enum declaration. Reserved ranges may not
+	// overlap.
+	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved enum value names, which may not be reused. A given name may only
+	// be reserved once.
+	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()    {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6}
+}
+
+func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
+}
+func (m *EnumDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto.Size(m)
+}
+func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
+func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6, 0}
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()    {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{7}
+}
+
+func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
+}
+func (m *EnumValueDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
+}
+func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()    {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{8}
+}
+
+func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
+}
+func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
+}
+func (m *ServiceDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
+}
+func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()    {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{9}
+}
+
+func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
+}
+func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
+}
+func (m *MethodDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_MethodDescriptorProto.Size(m)
+}
+func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// This option does nothing.
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// By default Swift generators will take the proto package and CamelCase it
+	// replacing '.' with underscore and use that to prefix the types/symbols
+	// defined. When this options is provided, they will use this value instead
+	// to prefix the types/symbols defined.
+	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+	// Sets the php class prefix which is prepended to all php generated classes
+	// from this .proto. Default is empty.
+	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be used
+	// for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// The parser stores options it doesn't recognize here.
+	// See the documentation for the "Options" section above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FileOptions) Reset()         { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()    {}
+func (*FileOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10}
+}
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+func (m *FileOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
+}
+func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
+}
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
+}
+func (m *FileOptions) XXX_Size() int {
+	return xxx_messageInfo_FileOptions.Size(m)
+}
+func (m *FileOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileOptions proto.InternalMessageInfo
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+	if m != nil && m.SwiftPrefix != nil {
+		return *m.SwiftPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+	if m != nil && m.PhpClassPrefix != nil {
+		return *m.PhpClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementions still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()    {}
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{11}
+}
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
+}
+func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
+}
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
+}
+func (m *MessageOptions) XXX_Size() int {
+	return xxx_messageInfo_MessageOptions.Size(m)
+}
+func (m *MessageOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+	// is represented as JavaScript string, which avoids loss of precision that
+	// can happen when a large value is converted to a floating point JavaScript.
+	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+	// use the JavaScript "number" type.  The behavior of the default option
+	// JS_NORMAL is implementation dependent.
+	//
+	// This option is an enum to permit additional types to be added, e.g.
+	// goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()    {}
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12}
+}
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
+}
+func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
+}
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
+}
+func (m *FieldOptions) XXX_Size() int {
+	return xxx_messageInfo_FieldOptions.Size(m)
+}
+func (m *FieldOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()    {}
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{13}
+}
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
+}
+func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
+}
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
+}
+func (m *OneofOptions) XXX_Size() int {
+	return xxx_messageInfo_OneofOptions.Size(m)
+}
+func (m *OneofOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()    {}
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{14}
+}
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
+}
+func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
+}
+func (m *EnumOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumOptions.Size(m)
+}
+func (m *EnumOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()    {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{15}
+}
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
+}
+func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
+}
+func (m *EnumValueOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumValueOptions.Size(m)
+}
+func (m *EnumValueOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()    {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{16}
+}
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
+}
+func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
+}
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
+}
+func (m *ServiceOptions) XXX_Size() int {
+	return xxx_messageInfo_ServiceOptions.Size(m)
+}
+func (m *ServiceOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()    {}
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17}
+}
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
+}
+func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
+}
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
+}
+func (m *MethodOptions) XXX_Size() int {
+	return xxx_messageInfo_MethodOptions.Size(m)
+}
+func (m *MethodOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+	if m != nil && m.IdempotencyLevel != nil {
+		return *m.IdempotencyLevel
+	}
+	return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()    {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18}
+}
+
+func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
+}
+func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
+}
+func (m *UninterpretedOption) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption.Size(m)
+}
+func (m *UninterpretedOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18, 0}
+}
+
+func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
+}
+func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
+}
+func (m *UninterpretedOption_NamePart) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
+}
+func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()    {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19}
+}
+
+func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
+}
+func (m *SourceCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo.Size(m)
+}
+func (m *SourceCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()    {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19, 0}
+}
+
+func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
+}
+func (m *SourceCodeInfo_Location) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
+}
+func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()    {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20}
+}
+
+func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
+}
+func (m *GeneratedCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
+}
+func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+
+var fileDescriptor_e5baabe45344a177 = []byte{
+	// 2589 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+	0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
+	0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
+	0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
+	0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
+	0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
+	0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
+	0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
+	0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
+	0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
+	0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
+	0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
+	0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
+	0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
+	0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
+	0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
+	0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
+	0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
+	0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
+	0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
+	0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
+	0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
+	0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
+	0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
+	0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
+	0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
+	0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
+	0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
+	0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
+	0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
+	0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
+	0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
+	0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
+	0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
+	0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
+	0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
+	0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
+	0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
+	0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
+	0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
+	0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
+	0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
+	0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
+	0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
+	0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
+	0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
+	0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
+	0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
+	0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
+	0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
+	0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
+	0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
+	0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
+	0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
+	0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
+	0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
+	0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
+	0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
+	0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
+	0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
+	0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
+	0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
+	0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
+	0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
+	0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
+	0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
+	0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
+	0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
+	0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
+	0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
+	0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
+	0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
+	0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
+	0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
+	0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
+	0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
+	0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
+	0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
+	0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
+	0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
+	0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
+	0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
+	0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
+	0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
+	0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
+	0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
+	0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
+	0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
+	0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
+	0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
+	0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
+	0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
+	0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
+	0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
+	0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
+	0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
+	0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
+	0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
+	0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
+	0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
+	0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
+	0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
+	0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
+	0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
+	0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
+	0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
+	0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
+	0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
+	0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
+	0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
+	0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
+	0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
+	0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
+	0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
+	0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
+	0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
+	0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
+	0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
+	0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
+	0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
+	0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
+	0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
+	0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
+	0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
+	0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
+	0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
+	0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
+	0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
+	0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
+	0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
+	0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
+	0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
+	0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
+	0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
+	0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
+	0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
+	0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
+	0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
+	0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
+	0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
+	0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
+	0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
+	0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
+	0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
+	0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
+	0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
+	0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
+	0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
+	0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
+	0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
+	0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
+	0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
+	0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
+	0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
+	0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
+	0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
+	0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
+	0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
+	0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
+	0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
+	0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..ed08fcb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,883 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+
+    optional ExtensionRangeOptions options = 3;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+
+  // Range of reserved numeric values. Reserved values may not be used by
+  // entries in the same enum. Reserved ranges may not overlap.
+  //
+  // Note that this is distinct from DescriptorProto.ReservedRange in that it
+  // is inclusive such that it can appropriately represent the entire int32
+  // domain.
+  message EnumReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Inclusive.
+  }
+
+  // Range of reserved numeric values. Reserved numeric values may not be used
+  // by enum values in the same enum declaration. Reserved ranges may not
+  // overlap.
+  repeated EnumReservedRange reserved_range = 4;
+
+  // Reserved enum value names, which may not be reused. A given name may only
+  // be reserved once.
+  repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 42 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+
+  // Use this option to change the namespace of php generated metadata classes.
+  // Default is empty. When this option is empty, the proto file name will be used
+  // for determining the namespace.
+  optional string php_metadata_namespace = 44;
+
+  // Use this option to change the package of ruby generated classes. Default
+  // is empty. When this option is not set, the package name will be used for
+  // determining the ruby package.
+  optional string ruby_package = 45;
+
+  // The parser stores options it doesn't recognize here.
+  // See the documentation for the "Options" section above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message.
+  // See the documentation for the "Options" section above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+  // is represented as JavaScript string, which avoids loss of precision that
+  // can happen when a large value is converted to a floating point JavaScript.
+  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+  // use the JavaScript "number" type.  The behavior of the default option
+  // JS_NORMAL is implementation dependent.
+  //
+  // This option is an enum to permit additional types to be added, e.g.
+  // goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
new file mode 100644
index 0000000..0d6055d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
@@ -0,0 +1,51 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+	A plugin for the Google protocol buffer compiler to generate Go code.
+	Run it by building this program and putting it in your path with the name
+		protoc-gen-go
+	That word 'go' at the end becomes part of the option string set for the
+	protocol compiler, so once the protocol compiler (protoc) is installed
+	you can run
+		protoc --go_out=output_directory input_directory/file.proto
+	to generate Go bindings for the protocol defined by file.proto.
+	With that input, the output will be written to
+		output_directory/file.pb.go
+
+	The generated code is documented in the package comment for
+	the library.
+
+	See the README and documentation for protocol buffers to learn more:
+		https://developers.google.com/protocol-buffers/
+
+*/
+package documentation
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
new file mode 100644
index 0000000..6f4a902
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -0,0 +1,2806 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+	The code generator for the plugin for the Google protocol buffer compiler.
+	It generates Go code from the protocol buffer description files read by the
+	main routine.
+*/
+package generator
+
+import (
+	"bufio"
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"log"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
+
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// proto package is introduced; the generated code references
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 3
+
+// A Plugin provides functionality to add to the output during Go code generation,
+// such as to produce RPC stubs.
+type Plugin interface {
+	// Name identifies the plugin.
+	Name() string
+	// Init is called once after data structures are built but before
+	// code generation begins.
+	Init(g *Generator)
+	// Generate produces the code generated by the plugin for this file,
+	// except for the imports, by calling the generator's methods P, In, and Out.
+	Generate(file *FileDescriptor)
+	// GenerateImports produces the import declarations for this file.
+	// It is called after Generate.
+	GenerateImports(file *FileDescriptor)
+}
+
+var plugins []Plugin
+
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
+// It is typically called during initialization.
+func RegisterPlugin(p Plugin) {
+	plugins = append(plugins, p)
+}
+
+// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
+type GoImportPath string
+
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
+
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
+type GoPackageName string
+
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
+// a pointer to the FileDescriptorProto that represents it.  These types achieve that
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
+// structs have the same names as their contents, with "Proto" removed.
+// FileDescriptor is used to store the things that it points to.
+
+// The file and package name method are common to messages and enums.
+type common struct {
+	file *FileDescriptor // File this object comes from.
+}
+
+// GoImportPath is the import path of the Go package containing the type.
+func (c *common) GoImportPath() GoImportPath {
+	return c.file.importPath
+}
+
+func (c *common) File() *FileDescriptor { return c.file }
+
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
+	return file.GetSyntax() == "proto3"
+}
+
+func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
+
+// Descriptor represents a protocol buffer message.
+type Descriptor struct {
+	common
+	*descriptor.DescriptorProto
+	parent   *Descriptor            // The containing message, if any.
+	nested   []*Descriptor          // Inner messages, if any.
+	enums    []*EnumDescriptor      // Inner enums, if any.
+	ext      []*ExtensionDescriptor // Extensions, if any.
+	typename []string               // Cached typename vector.
+	index    int                    // The index into the container, whether the file or another message.
+	path     string                 // The SourceCodeInfo path as comma-separated integers.
+	group    bool
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (d *Descriptor) TypeName() []string {
+	if d.typename != nil {
+		return d.typename
+	}
+	n := 0
+	for parent := d; parent != nil; parent = parent.parent {
+		n++
+	}
+	s := make([]string, n)
+	for parent := d; parent != nil; parent = parent.parent {
+		n--
+		s[n] = parent.GetName()
+	}
+	d.typename = s
+	return s
+}
+
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type EnumDescriptor struct {
+	common
+	*descriptor.EnumDescriptorProto
+	parent   *Descriptor // The containing message, if any.
+	typename []string    // Cached typename vector.
+	index    int         // The index into the container, whether the file or a message.
+	path     string      // The SourceCodeInfo path as comma-separated integers.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *EnumDescriptor) TypeName() (s []string) {
+	if e.typename != nil {
+		return e.typename
+	}
+	name := e.GetName()
+	if e.parent == nil {
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	e.typename = s
+	return s
+}
+
+// Everything but the last element of the full type name, CamelCased.
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
+func (e *EnumDescriptor) prefix() string {
+	if e.parent == nil {
+		// If the enum is not part of a message, the prefix is just the type name.
+		return CamelCase(*e.Name) + "_"
+	}
+	typeName := e.TypeName()
+	return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
+}
+
+// The integer value of the named constant in this enumerated type.
+func (e *EnumDescriptor) integerValueAsString(name string) string {
+	for _, c := range e.Value {
+		if c.GetName() == name {
+			return fmt.Sprint(c.GetNumber())
+		}
+	}
+	log.Fatal("cannot find value for enum constant")
+	return ""
+}
+
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type ExtensionDescriptor struct {
+	common
+	*descriptor.FieldDescriptorProto
+	parent *Descriptor // The containing message, if any.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *ExtensionDescriptor) TypeName() (s []string) {
+	name := e.GetName()
+	if e.parent == nil {
+		// top-level extension
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	return s
+}
+
+// DescName returns the variable name used for the generated descriptor.
+func (e *ExtensionDescriptor) DescName() string {
+	// The full type name.
+	typeName := e.TypeName()
+	// Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
+	for i, s := range typeName {
+		typeName[i] = CamelCase(s)
+	}
+	return "E_" + strings.Join(typeName, "_")
+}
+
+// ImportedDescriptor describes a type that has been publicly imported from another file.
+type ImportedDescriptor struct {
+	common
+	o Object
+}
+
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
+
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
+// It includes slices of all the messages and enums defined within it.
+// Those slices are constructed by WrapTypes.
+type FileDescriptor struct {
+	*descriptor.FileDescriptorProto
+	desc []*Descriptor          // All the messages defined in this file.
+	enum []*EnumDescriptor      // All the enums defined in this file.
+	ext  []*ExtensionDescriptor // All the top-level extensions defined in this file.
+	imp  []*ImportedDescriptor  // All types defined in files publicly imported by this file.
+
+	// Comments, stored as a map of path (comma-separated integers) to the comment.
+	comments map[string]*descriptor.SourceCodeInfo_Location
+
+	// The full list of symbols that are exported,
+	// as a map from the exported object to its symbols.
+	// This is used for supporting public imports.
+	exported map[Object][]symbol
+
+	importPath  GoImportPath  // Import path of this file's package.
+	packageName GoPackageName // Name of this file's Go package.
+
+	proto3 bool // whether to generate proto3 code for this file
+}
+
+// VarName is the variable name we'll use in the generated code to refer
+// to the compressed bytes of this descriptor. It is not exported, so
+// it is only valid inside the generated package.
+func (d *FileDescriptor) VarName() string {
+	h := sha256.Sum256([]byte(d.GetName()))
+	return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8]))
+}
+
+// goPackageOption interprets the file's go_package option.
+// If there is no go_package, it returns ("", "", false).
+// If there's a simple name, it returns ("", pkg, true).
+// If the option implies an import path, it returns (impPath, pkg, true).
+func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
+	opt := d.GetOptions().GetGoPackage()
+	if opt == "" {
+		return "", "", false
+	}
+	// A semicolon-delimited suffix delimits the import path and package name.
+	sc := strings.Index(opt, ";")
+	if sc >= 0 {
+		return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
+	}
+	// The presence of a slash implies there's an import path.
+	slash := strings.LastIndex(opt, "/")
+	if slash >= 0 {
+		return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
+	}
+	return "", cleanPackageName(opt), true
+}
+
+// goFileName returns the output name for the generated Go file.
+func (d *FileDescriptor) goFileName(pathType pathType) string {
+	name := *d.Name
+	if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
+		name = name[:len(name)-len(ext)]
+	}
+	name += ".pb.go"
+
+	if pathType == pathTypeSourceRelative {
+		return name
+	}
+
+	// Does the file have a "go_package" option?
+	// If it does, it may override the filename.
+	if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
+		// Replace the existing dirname with the declared import path.
+		_, name = path.Split(name)
+		name = path.Join(string(impPath), name)
+		return name
+	}
+
+	return name
+}
+
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
+	d.exported[obj] = append(d.exported[obj], sym)
+}
+
+// symbol is an interface representing an exported Go symbol.
+type symbol interface {
+	// GenerateAlias should generate an appropriate alias
+	// for the symbol from the named package.
+	GenerateAlias(g *Generator, filename string, pkg GoPackageName)
+}
+
+type messageSymbol struct {
+	sym                         string
+	hasExtensions, isMessageSet bool
+	oneofTypes                  []string
+}
+
+type getterSymbol struct {
+	name     string
+	typ      string
+	typeName string // canonical name in proto world; empty for proto.Message and similar
+	genType  bool   // whether typ contains a generated type (message/group/enum)
+}
+
+func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	g.P("// ", ms.sym, " from public import ", filename)
+	g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
+	for _, name := range ms.oneofTypes {
+		g.P("type ", name, " = ", pkg, ".", name)
+	}
+}
+
+type enumSymbol struct {
+	name   string
+	proto3 bool // Whether this came from a proto3 file.
+}
+
+func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	s := es.name
+	g.P("// ", s, " from public import ", filename)
+	g.P("type ", s, " = ", pkg, ".", s)
+	g.P("var ", s, "_name = ", pkg, ".", s, "_name")
+	g.P("var ", s, "_value = ", pkg, ".", s, "_value")
+}
+
+type constOrVarSymbol struct {
+	sym  string
+	typ  string // either "const" or "var"
+	cast string // if non-empty, a type cast is required (used for enums)
+}
+
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	v := string(pkg) + "." + cs.sym
+	if cs.cast != "" {
+		v = cs.cast + "(" + v + ")"
+	}
+	g.P(cs.typ, " ", cs.sym, " = ", v)
+}
+
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
+type Object interface {
+	GoImportPath() GoImportPath
+	TypeName() []string
+	File() *FileDescriptor
+}
+
+// Generator is the type whose methods generate the output, stored in the associated response structure.
+type Generator struct {
+	*bytes.Buffer
+
+	Request  *plugin.CodeGeneratorRequest  // The input.
+	Response *plugin.CodeGeneratorResponse // The output.
+
+	Param             map[string]string // Command-line parameters.
+	PackageImportPath string            // Go import path of the package we're generating code for
+	ImportPrefix      string            // String to prefix to imported package file names.
+	ImportMap         map[string]string // Mapping from .proto file name to import path
+
+	Pkg map[string]string // The names under which we import support packages
+
+	outputImportPath GoImportPath                   // Package we're generating code for.
+	allFiles         []*FileDescriptor              // All files in the tree
+	allFilesByName   map[string]*FileDescriptor     // All files by filename.
+	genFiles         []*FileDescriptor              // Those files we will generate output for.
+	file             *FileDescriptor                // The file we are compiling now.
+	packageNames     map[GoImportPath]GoPackageName // Imported package names in the current file.
+	usedPackages     map[GoImportPath]bool          // Packages used in current file.
+	usedPackageNames map[GoPackageName]bool         // Package names used in the current file.
+	addedImports     map[GoImportPath]bool          // Additional imports to emit.
+	typeNameToObject map[string]Object              // Key is a fully-qualified name in input syntax.
+	init             []string                       // Lines to emit in the init function.
+	indent           string
+	pathType         pathType // How to generate output filenames.
+	writeOutput      bool
+	annotateCode     bool                                       // whether to store annotations
+	annotations      []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
+}
+
+type pathType int
+
+const (
+	pathTypeImport pathType = iota
+	pathTypeSourceRelative
+)
+
+// New creates a new generator and allocates the request and response protobufs.
+func New() *Generator {
+	g := new(Generator)
+	g.Buffer = new(bytes.Buffer)
+	g.Request = new(plugin.CodeGeneratorRequest)
+	g.Response = new(plugin.CodeGeneratorResponse)
+	return g
+}
+
+// Error reports a problem, including an error, and exits the program.
+func (g *Generator) Error(err error, msgs ...string) {
+	s := strings.Join(msgs, " ") + ":" + err.Error()
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// Fail reports a problem and exits the program.
+func (g *Generator) Fail(msgs ...string) {
+	s := strings.Join(msgs, " ")
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// CommandLineParameters breaks the comma-separated list of key=value pairs
+// in the parameter (a member of the request protobuf) into a key/value map.
+// It then sets file name mappings defined by those entries.
+func (g *Generator) CommandLineParameters(parameter string) {
+	g.Param = make(map[string]string)
+	for _, p := range strings.Split(parameter, ",") {
+		if i := strings.Index(p, "="); i < 0 {
+			g.Param[p] = ""
+		} else {
+			g.Param[p[0:i]] = p[i+1:]
+		}
+	}
+
+	g.ImportMap = make(map[string]string)
+	pluginList := "none" // Default list of plugin names to enable (empty means all).
+	for k, v := range g.Param {
+		switch k {
+		case "import_prefix":
+			g.ImportPrefix = v
+		case "import_path":
+			g.PackageImportPath = v
+		case "paths":
+			switch v {
+			case "import":
+				g.pathType = pathTypeImport
+			case "source_relative":
+				g.pathType = pathTypeSourceRelative
+			default:
+				g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
+			}
+		case "plugins":
+			pluginList = v
+		case "annotate_code":
+			if v == "true" {
+				g.annotateCode = true
+			}
+		default:
+			if len(k) > 0 && k[0] == 'M' {
+				g.ImportMap[k[1:]] = v
+			}
+		}
+	}
+	if pluginList != "" {
+		// Amend the set of plugins.
+		enabled := make(map[string]bool)
+		for _, name := range strings.Split(pluginList, "+") {
+			enabled[name] = true
+		}
+		var nplugins []Plugin
+		for _, p := range plugins {
+			if enabled[p.Name()] {
+				nplugins = append(nplugins, p)
+			}
+		}
+		plugins = nplugins
+	}
+}
+
+// DefaultPackageName returns the package name printed for the object.
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
+// Otherwise it returns the empty string.
+func (g *Generator) DefaultPackageName(obj Object) string {
+	importPath := obj.GoImportPath()
+	if importPath == g.outputImportPath {
+		return ""
+	}
+	return string(g.GoPackageName(importPath)) + "."
+}
+
+// GoPackageName returns the name used for a package.
+func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
+	if name, ok := g.packageNames[importPath]; ok {
+		return name
+	}
+	name := cleanPackageName(baseName(string(importPath)))
+	for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	g.packageNames[importPath] = name
+	g.usedPackageNames[name] = true
+	return name
+}
+
+// AddImport adds a package to the generated file's import section.
+// It returns the name used for the package.
+func (g *Generator) AddImport(importPath GoImportPath) GoPackageName {
+	g.addedImports[importPath] = true
+	return g.GoPackageName(importPath)
+}
+
+var globalPackageNames = map[GoPackageName]bool{
+	"fmt":   true,
+	"math":  true,
+	"proto": true,
+}
+
+// Create and remember a guaranteed unique package name. Pkg is the candidate name.
+// The FileDescriptor parameter is unused.
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
+	name := cleanPackageName(pkg)
+	for i, orig := 1, name; globalPackageNames[name]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	globalPackageNames[name] = true
+	return string(name)
+}
+
+var isGoKeyword = map[string]bool{
+	"break":       true,
+	"case":        true,
+	"chan":        true,
+	"const":       true,
+	"continue":    true,
+	"default":     true,
+	"else":        true,
+	"defer":       true,
+	"fallthrough": true,
+	"for":         true,
+	"func":        true,
+	"go":          true,
+	"goto":        true,
+	"if":          true,
+	"import":      true,
+	"interface":   true,
+	"map":         true,
+	"package":     true,
+	"range":       true,
+	"return":      true,
+	"select":      true,
+	"struct":      true,
+	"switch":      true,
+	"type":        true,
+	"var":         true,
+}
+
+var isGoPredeclaredIdentifier = map[string]bool{
+	"append":     true,
+	"bool":       true,
+	"byte":       true,
+	"cap":        true,
+	"close":      true,
+	"complex":    true,
+	"complex128": true,
+	"complex64":  true,
+	"copy":       true,
+	"delete":     true,
+	"error":      true,
+	"false":      true,
+	"float32":    true,
+	"float64":    true,
+	"imag":       true,
+	"int":        true,
+	"int16":      true,
+	"int32":      true,
+	"int64":      true,
+	"int8":       true,
+	"iota":       true,
+	"len":        true,
+	"make":       true,
+	"new":        true,
+	"nil":        true,
+	"panic":      true,
+	"print":      true,
+	"println":    true,
+	"real":       true,
+	"recover":    true,
+	"rune":       true,
+	"string":     true,
+	"true":       true,
+	"uint":       true,
+	"uint16":     true,
+	"uint32":     true,
+	"uint64":     true,
+	"uint8":      true,
+	"uintptr":    true,
+}
+
+func cleanPackageName(name string) GoPackageName {
+	name = strings.Map(badToUnderscore, name)
+	// Identifier must not be keyword or predeclared identifier: insert _.
+	if isGoKeyword[name] {
+		name = "_" + name
+	}
+	// Identifier must not begin with digit: insert _.
+	if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
+		name = "_" + name
+	}
+	return GoPackageName(name)
+}
+
+// defaultGoPackage returns the package name to use,
+// derived from the import path of the package we're building code for.
+func (g *Generator) defaultGoPackage() GoPackageName {
+	p := g.PackageImportPath
+	if i := strings.LastIndex(p, "/"); i >= 0 {
+		p = p[i+1:]
+	}
+	return cleanPackageName(p)
+}
+
+// SetPackageNames sets the package name for this run.
+// The package name must agree across all files being generated.
+// It also defines unique package names for all imported files.
+func (g *Generator) SetPackageNames() {
+	g.outputImportPath = g.genFiles[0].importPath
+
+	defaultPackageNames := make(map[GoImportPath]GoPackageName)
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			defaultPackageNames[f.importPath] = p
+		}
+	}
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			// Source file: option go_package = "quux/bar";
+			f.packageName = p
+		} else if p, ok := defaultPackageNames[f.importPath]; ok {
+			// A go_package option in another file in the same package.
+			//
+			// This is a poor choice in general, since every source file should
+			// contain a go_package option. Supported mainly for historical
+			// compatibility.
+			f.packageName = p
+		} else if p := g.defaultGoPackage(); p != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets a package name for files which don't
+			// contain a go_package option.
+			f.packageName = p
+		} else if p := f.GetPackage(); p != "" {
+			// Source file: package quux.bar;
+			f.packageName = cleanPackageName(p)
+		} else {
+			// Source filename.
+			f.packageName = cleanPackageName(baseName(f.GetName()))
+		}
+	}
+
+	// Check that all files have a consistent package name and import path.
+	for _, f := range g.genFiles[1:] {
+		if a, b := g.genFiles[0].importPath, f.importPath; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
+		}
+		if a, b := g.genFiles[0].packageName, f.packageName; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
+		}
+	}
+
+	// Names of support packages. These never vary (if there are conflicts,
+	// we rename the conflicting package), so this could be removed someday.
+	g.Pkg = map[string]string{
+		"fmt":   "fmt",
+		"math":  "math",
+		"proto": "proto",
+	}
+}
+
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
+// and FileDescriptorProtos into file-referenced objects within the Generator.
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
+func (g *Generator) WrapTypes() {
+	g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
+	g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
+	genFileNames := make(map[string]bool)
+	for _, n := range g.Request.FileToGenerate {
+		genFileNames[n] = true
+	}
+	for _, f := range g.Request.ProtoFile {
+		fd := &FileDescriptor{
+			FileDescriptorProto: f,
+			exported:            make(map[Object][]symbol),
+			proto3:              fileIsProto3(f),
+		}
+		// The import path may be set in a number of ways.
+		if substitution, ok := g.ImportMap[f.GetName()]; ok {
+			// Command-line: M=foo.proto=quux/bar.
+			//
+			// Explicit mapping of source file to import path.
+			fd.importPath = GoImportPath(substitution)
+		} else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets the import path for every file that
+			// we generate code for.
+			fd.importPath = GoImportPath(g.PackageImportPath)
+		} else if p, _, _ := fd.goPackageOption(); p != "" {
+			// Source file: option go_package = "quux/bar";
+			//
+			// The go_package option sets the import path. Most users should use this.
+			fd.importPath = p
+		} else {
+			// Source filename.
+			//
+			// Last resort when nothing else is available.
+			fd.importPath = GoImportPath(path.Dir(f.GetName()))
+		}
+		// We must wrap the descriptors before we wrap the enums
+		fd.desc = wrapDescriptors(fd)
+		g.buildNestedDescriptors(fd.desc)
+		fd.enum = wrapEnumDescriptors(fd, fd.desc)
+		g.buildNestedEnums(fd.desc, fd.enum)
+		fd.ext = wrapExtensions(fd)
+		extractComments(fd)
+		g.allFiles = append(g.allFiles, fd)
+		g.allFilesByName[f.GetName()] = fd
+	}
+	for _, fd := range g.allFiles {
+		fd.imp = wrapImported(fd, g)
+	}
+
+	g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
+	for _, fileName := range g.Request.FileToGenerate {
+		fd := g.allFilesByName[fileName]
+		if fd == nil {
+			g.Fail("could not find file named", fileName)
+		}
+		g.genFiles = append(g.genFiles, fd)
+	}
+}
+
+// Scan the descriptors in this file.  For each one, build the slice of nested descriptors
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
+	for _, desc := range descs {
+		if len(desc.NestedType) != 0 {
+			for _, nest := range descs {
+				if nest.parent == desc {
+					desc.nested = append(desc.nested, nest)
+				}
+			}
+			if len(desc.nested) != len(desc.NestedType) {
+				g.Fail("internal error: nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
+	for _, desc := range descs {
+		if len(desc.EnumType) != 0 {
+			for _, enum := range enums {
+				if enum.parent == desc {
+					desc.enums = append(desc.enums, enum)
+				}
+			}
+			if len(desc.enums) != len(desc.EnumType) {
+				g.Fail("internal error: enum nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+// Construct the Descriptor
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
+	d := &Descriptor{
+		common:          common{file},
+		DescriptorProto: desc,
+		parent:          parent,
+		index:           index,
+	}
+	if parent == nil {
+		d.path = fmt.Sprintf("%d,%d", messagePath, index)
+	} else {
+		d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
+	}
+
+	// The only way to distinguish a group from a message is whether
+	// the containing message has a TYPE_GROUP field that matches.
+	if parent != nil {
+		parts := d.TypeName()
+		if file.Package != nil {
+			parts = append([]string{*file.Package}, parts...)
+		}
+		exp := "." + strings.Join(parts, ".")
+		for _, field := range parent.Field {
+			if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
+				d.group = true
+				break
+			}
+		}
+	}
+
+	for _, field := range desc.Extension {
+		d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
+	}
+
+	return d
+}
+
+// Return a slice of all the Descriptors defined within this file
+func wrapDescriptors(file *FileDescriptor) []*Descriptor {
+	sl := make([]*Descriptor, 0, len(file.MessageType)+10)
+	for i, desc := range file.MessageType {
+		sl = wrapThisDescriptor(sl, desc, nil, file, i)
+	}
+	return sl
+}
+
+// Wrap this Descriptor, recursively
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
+	sl = append(sl, newDescriptor(desc, parent, file, index))
+	me := sl[len(sl)-1]
+	for i, nested := range desc.NestedType {
+		sl = wrapThisDescriptor(sl, nested, me, file, i)
+	}
+	return sl
+}
+
+// Construct the EnumDescriptor
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
+	ed := &EnumDescriptor{
+		common:              common{file},
+		EnumDescriptorProto: desc,
+		parent:              parent,
+		index:               index,
+	}
+	if parent == nil {
+		ed.path = fmt.Sprintf("%d,%d", enumPath, index)
+	} else {
+		ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
+	}
+	return ed
+}
+
+// Return a slice of all the EnumDescriptors defined within this file
+func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
+	sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
+	// Top-level enums.
+	for i, enum := range file.EnumType {
+		sl = append(sl, newEnumDescriptor(enum, nil, file, i))
+	}
+	// Enums within messages. Enums within embedded messages appear in the outer-most message.
+	for _, nested := range descs {
+		for i, enum := range nested.EnumType {
+			sl = append(sl, newEnumDescriptor(enum, nested, file, i))
+		}
+	}
+	return sl
+}
+
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
+func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
+	var sl []*ExtensionDescriptor
+	for _, field := range file.Extension {
+		sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
+	}
+	return sl
+}
+
+// Return a slice of all the types that are publicly imported into this file.
+func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
+	for _, index := range file.PublicDependency {
+		df := g.fileByName(file.Dependency[index])
+		for _, d := range df.desc {
+			if d.GetOptions().GetMapEntry() {
+				continue
+			}
+			sl = append(sl, &ImportedDescriptor{common{file}, d})
+		}
+		for _, e := range df.enum {
+			sl = append(sl, &ImportedDescriptor{common{file}, e})
+		}
+		for _, ext := range df.ext {
+			sl = append(sl, &ImportedDescriptor{common{file}, ext})
+		}
+	}
+	return
+}
+
+func extractComments(file *FileDescriptor) {
+	file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
+	for _, loc := range file.GetSourceCodeInfo().GetLocation() {
+		if loc.LeadingComments == nil {
+			continue
+		}
+		var p []string
+		for _, n := range loc.Path {
+			p = append(p, strconv.Itoa(int(n)))
+		}
+		file.comments[strings.Join(p, ",")] = loc
+	}
+}
+
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
+// The key names for the map come from the input data, which puts a period at the beginning.
+// It should be called after SetPackageNames and before GenerateAllFiles.
+func (g *Generator) BuildTypeNameMap() {
+	g.typeNameToObject = make(map[string]Object)
+	for _, f := range g.allFiles {
+		// The names in this loop are defined by the proto world, not us, so the
+		// package name may be empty.  If so, the dotted package name of X will
+		// be ".X"; otherwise it will be ".pkg.X".
+		dottedPkg := "." + f.GetPackage()
+		if dottedPkg != "." {
+			dottedPkg += "."
+		}
+		for _, enum := range f.enum {
+			name := dottedPkg + dottedSlice(enum.TypeName())
+			g.typeNameToObject[name] = enum
+		}
+		for _, desc := range f.desc {
+			name := dottedPkg + dottedSlice(desc.TypeName())
+			g.typeNameToObject[name] = desc
+		}
+	}
+}
+
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
+// returns the descriptor for the message or enum with that name.
+func (g *Generator) ObjectNamed(typeName string) Object {
+	o, ok := g.typeNameToObject[typeName]
+	if !ok {
+		g.Fail("can't find object with type", typeName)
+	}
+	return o
+}
+
+// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
+type AnnotatedAtoms struct {
+	source string
+	path   string
+	atoms  []interface{}
+}
+
+// Annotate records the file name and proto AST path of a list of atoms
+// so that a later call to P can emit a link from each atom to its origin.
+func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
+	return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
+}
+
+// printAtom prints the (atomic, non-annotation) argument to the generated output.
+func (g *Generator) printAtom(v interface{}) {
+	switch v := v.(type) {
+	case string:
+		g.WriteString(v)
+	case *string:
+		g.WriteString(*v)
+	case bool:
+		fmt.Fprint(g, v)
+	case *bool:
+		fmt.Fprint(g, *v)
+	case int:
+		fmt.Fprint(g, v)
+	case *int32:
+		fmt.Fprint(g, *v)
+	case *int64:
+		fmt.Fprint(g, *v)
+	case float64:
+		fmt.Fprint(g, v)
+	case *float64:
+		fmt.Fprint(g, *v)
+	case GoPackageName:
+		g.WriteString(string(v))
+	case GoImportPath:
+		g.WriteString(strconv.Quote(string(v)))
+	default:
+		g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
+	}
+}
+
+// P prints the arguments to the generated output.  It handles strings and int32s, plus
+// handling indirections because they may be *string, etc.  Any inputs of type AnnotatedAtoms may emit
+// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
+// is true).
+func (g *Generator) P(str ...interface{}) {
+	if !g.writeOutput {
+		return
+	}
+	g.WriteString(g.indent)
+	for _, v := range str {
+		switch v := v.(type) {
+		case *AnnotatedAtoms:
+			begin := int32(g.Len())
+			for _, v := range v.atoms {
+				g.printAtom(v)
+			}
+			if g.annotateCode {
+				end := int32(g.Len())
+				var path []int32
+				for _, token := range strings.Split(v.path, ",") {
+					val, err := strconv.ParseInt(token, 10, 32)
+					if err != nil {
+						g.Fail("could not parse proto AST path: ", err.Error())
+					}
+					path = append(path, int32(val))
+				}
+				g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
+					Path:       path,
+					SourceFile: &v.source,
+					Begin:      &begin,
+					End:        &end,
+				})
+			}
+		default:
+			g.printAtom(v)
+		}
+	}
+	g.WriteByte('\n')
+}
+
+// addInitf stores the given statement to be printed inside the file's init function.
+// The statement is given as a format specifier and arguments.
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
+	g.init = append(g.init, fmt.Sprintf(stmt, a...))
+}
+
+// In Indents the output one tab stop.
+func (g *Generator) In() { g.indent += "\t" }
+
+// Out unindents the output one tab stop.
+func (g *Generator) Out() {
+	if len(g.indent) > 0 {
+		g.indent = g.indent[1:]
+	}
+}
+
+// GenerateAllFiles generates the output for all the files we're outputting.
+func (g *Generator) GenerateAllFiles() {
+	// Initialize the plugins
+	for _, p := range plugins {
+		p.Init(g)
+	}
+	// Generate the output. The generator runs for every file, even the files
+	// that we don't generate output for, so that we can collate the full list
+	// of exported symbols to support public imports.
+	genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
+	for _, file := range g.genFiles {
+		genFileMap[file] = true
+	}
+	for _, file := range g.allFiles {
+		g.Reset()
+		g.annotations = nil
+		g.writeOutput = genFileMap[file]
+		g.generate(file)
+		if !g.writeOutput {
+			continue
+		}
+		fname := file.goFileName(g.pathType)
+		g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+			Name:    proto.String(fname),
+			Content: proto.String(g.String()),
+		})
+		if g.annotateCode {
+			// Store the generated code annotations in text, as the protoc plugin protocol requires that
+			// strings contain valid UTF-8.
+			g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+				Name:    proto.String(file.goFileName(g.pathType) + ".meta"),
+				Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
+			})
+		}
+	}
+}
+
+// Run all the plugins associated with the file.
+func (g *Generator) runPlugins(file *FileDescriptor) {
+	for _, p := range plugins {
+		p.Generate(file)
+	}
+}
+
+// Fill the response protocol buffer with the generated output for all the files we're
+// supposed to generate.
+func (g *Generator) generate(file *FileDescriptor) {
+	g.file = file
+	g.usedPackages = make(map[GoImportPath]bool)
+	g.packageNames = make(map[GoImportPath]GoPackageName)
+	g.usedPackageNames = make(map[GoPackageName]bool)
+	g.addedImports = make(map[GoImportPath]bool)
+	for name := range globalPackageNames {
+		g.usedPackageNames[name] = true
+	}
+
+	g.P("// This is a compile-time assertion to ensure that this generated file")
+	g.P("// is compatible with the proto package it is being compiled against.")
+	g.P("// A compilation error at this line likely means your copy of the")
+	g.P("// proto package needs to be updated.")
+	g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
+	g.P()
+
+	for _, td := range g.file.imp {
+		g.generateImported(td)
+	}
+	for _, enum := range g.file.enum {
+		g.generateEnum(enum)
+	}
+	for _, desc := range g.file.desc {
+		// Don't generate virtual messages for maps.
+		if desc.GetOptions().GetMapEntry() {
+			continue
+		}
+		g.generateMessage(desc)
+	}
+	for _, ext := range g.file.ext {
+		g.generateExtension(ext)
+	}
+	g.generateInitFunction()
+	g.generateFileDescriptor(file)
+
+	// Run the plugins before the imports so we know which imports are necessary.
+	g.runPlugins(file)
+
+	// Generate header and imports last, though they appear first in the output.
+	rem := g.Buffer
+	remAnno := g.annotations
+	g.Buffer = new(bytes.Buffer)
+	g.annotations = nil
+	g.generateHeader()
+	g.generateImports()
+	if !g.writeOutput {
+		return
+	}
+	// Adjust the offsets for annotations displaced by the header and imports.
+	for _, anno := range remAnno {
+		*anno.Begin += int32(g.Len())
+		*anno.End += int32(g.Len())
+		g.annotations = append(g.annotations, anno)
+	}
+	g.Write(rem.Bytes())
+
+	// Reformat generated code and patch annotation locations.
+	fset := token.NewFileSet()
+	original := g.Bytes()
+	if g.annotateCode {
+		// make a copy independent of g; we'll need it after Reset.
+		original = append([]byte(nil), original...)
+	}
+	fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments)
+	if err != nil {
+		// Print out the bad code with line numbers.
+		// This should never happen in practice, but it can while changing generated code,
+		// so consider this a debugging aid.
+		var src bytes.Buffer
+		s := bufio.NewScanner(bytes.NewReader(original))
+		for line := 1; s.Scan(); line++ {
+			fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+		}
+		g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
+	}
+	ast.SortImports(fset, fileAST)
+	g.Reset()
+	err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST)
+	if err != nil {
+		g.Fail("generated Go source code could not be reformatted:", err.Error())
+	}
+	if g.annotateCode {
+		m, err := remap.Compute(original, g.Bytes())
+		if err != nil {
+			g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
+		}
+		for _, anno := range g.annotations {
+			new, ok := m.Find(int(*anno.Begin), int(*anno.End))
+			if !ok {
+				g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
+			}
+			*anno.Begin = int32(new.Pos)
+			*anno.End = int32(new.End)
+		}
+	}
+}
+
+// Generate the header, including package definition
+func (g *Generator) generateHeader() {
+	g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+	if g.file.GetOptions().GetDeprecated() {
+		g.P("// ", g.file.Name, " is a deprecated file.")
+	} else {
+		g.P("// source: ", g.file.Name)
+	}
+	g.P()
+	g.PrintComments(strconv.Itoa(packagePath))
+	g.P()
+	g.P("package ", g.file.packageName)
+	g.P()
+}
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// PrintComments prints any comments from the source .proto file.
+// The path is a comma-separated list of integers.
+// It returns an indication of whether any comments were printed.
+// See descriptor.proto for its format.
+func (g *Generator) PrintComments(path string) bool {
+	if !g.writeOutput {
+		return false
+	}
+	if c, ok := g.makeComments(path); ok {
+		g.P(c)
+		return true
+	}
+	return false
+}
+
+// makeComments generates the comment string for the field, no "\n" at the end
+func (g *Generator) makeComments(path string) (string, bool) {
+	loc, ok := g.file.comments[path]
+	if !ok {
+		return "", false
+	}
+	w := new(bytes.Buffer)
+	nl := ""
+	for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
+		fmt.Fprintf(w, "%s//%s", nl, line)
+		nl = "\n"
+	}
+	return w.String(), true
+}
+
+func (g *Generator) fileByName(filename string) *FileDescriptor {
+	return g.allFilesByName[filename]
+}
+
+// weak returns whether the ith import of the current file is a weak import.
+func (g *Generator) weak(i int32) bool {
+	for _, j := range g.file.WeakDependency {
+		if j == i {
+			return true
+		}
+	}
+	return false
+}
+
+// Generate the imports
+func (g *Generator) generateImports() {
+	imports := make(map[GoImportPath]GoPackageName)
+	for i, s := range g.file.Dependency {
+		fd := g.fileByName(s)
+		importPath := fd.importPath
+		// Do not import our own package.
+		if importPath == g.file.importPath {
+			continue
+		}
+		// Do not import weak imports.
+		if g.weak(int32(i)) {
+			continue
+		}
+		// Do not import a package twice.
+		if _, ok := imports[importPath]; ok {
+			continue
+		}
+		// We need to import all the dependencies, even if we don't reference them,
+		// because other code and tools depend on having the full transitive closure
+		// of protocol buffer types in the binary.
+		packageName := g.GoPackageName(importPath)
+		if _, ok := g.usedPackages[importPath]; !ok {
+			packageName = "_"
+		}
+		imports[importPath] = packageName
+	}
+	for importPath := range g.addedImports {
+		imports[importPath] = g.GoPackageName(importPath)
+	}
+	// We almost always need a proto import.  Rather than computing when we
+	// do, which is tricky when there's a plugin, just import it and
+	// reference it later. The same argument applies to the fmt and math packages.
+	g.P("import (")
+	g.P(g.Pkg["fmt"] + ` "fmt"`)
+	g.P(g.Pkg["math"] + ` "math"`)
+	g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
+	for importPath, packageName := range imports {
+		g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath)
+	}
+	g.P(")")
+	g.P()
+	// TODO: may need to worry about uniqueness across plugins
+	for _, p := range plugins {
+		p.GenerateImports(g.file)
+		g.P()
+	}
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
+	g.P("var _ = ", g.Pkg["proto"], ".Marshal")
+	g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
+	g.P("var _ = ", g.Pkg["math"], ".Inf")
+	g.P()
+}
+
+func (g *Generator) generateImported(id *ImportedDescriptor) {
+	df := id.o.File()
+	filename := *df.Name
+	if df.importPath == g.file.importPath {
+		// Don't generate type aliases for files in the same Go package as this one.
+		return
+	}
+	if !supportTypeAliases {
+		g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
+	}
+	g.usedPackages[df.importPath] = true
+
+	for _, sym := range df.exported[id.o] {
+		sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath))
+	}
+
+	g.P()
+}
+
+// Generate the enum definitions for this EnumDescriptor.
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	ccPrefix := enum.prefix()
+
+	deprecatedEnum := ""
+	if enum.GetOptions().GetDeprecated() {
+		deprecatedEnum = deprecationComment
+	}
+	g.PrintComments(enum.path)
+	g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
+	g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
+	g.P("const (")
+	for i, e := range enum.Value {
+		etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
+		g.PrintComments(etorPath)
+
+		deprecatedValue := ""
+		if e.GetOptions().GetDeprecated() {
+			deprecatedValue = deprecationComment
+		}
+
+		name := ccPrefix + *e.Name
+		g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
+		g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
+	}
+	g.P(")")
+	g.P()
+	g.P("var ", ccTypeName, "_name = map[int32]string{")
+	generated := make(map[int32]bool) // avoid duplicate values
+	for _, e := range enum.Value {
+		duplicate := ""
+		if _, present := generated[*e.Number]; present {
+			duplicate = "// Duplicate value: "
+		}
+		g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
+		generated[*e.Number] = true
+	}
+	g.P("}")
+	g.P()
+	g.P("var ", ccTypeName, "_value = map[string]int32{")
+	for _, e := range enum.Value {
+		g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
+	}
+	g.P("}")
+	g.P()
+
+	if !enum.proto3() {
+		g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
+		g.P("p := new(", ccTypeName, ")")
+		g.P("*p = x")
+		g.P("return p")
+		g.P("}")
+		g.P()
+	}
+
+	g.P("func (x ", ccTypeName, ") String() string {")
+	g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
+	g.P("}")
+	g.P()
+
+	if !enum.proto3() {
+		g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
+		g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
+		g.P("if err != nil {")
+		g.P("return err")
+		g.P("}")
+		g.P("*x = ", ccTypeName, "(value)")
+		g.P("return nil")
+		g.P("}")
+		g.P()
+	}
+
+	var indexes []string
+	for m := enum.parent; m != nil; m = m.parent {
+		// XXX: skip groups?
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	indexes = append(indexes, strconv.Itoa(enum.index))
+	g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	g.P()
+	if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
+		g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
+		g.P()
+	}
+
+	g.generateEnumRegistration(enum)
+}
+
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
+// code.  The fields are:
+//	wire encoding
+//	protocol tag number
+//	opt,req,rep for optional, required, or repeated
+//	packed whether the encoding is "packed" (optional; repeated primitives only)
+//	name= the original declared name
+//	enum= the name of the enum type if it is an enum-typed field.
+//	proto3 if this field is in a proto3 message
+//	def= string representation of the default value, if any.
+// The default value must be in a representation that can be used at run-time
+// to generate the default value. Thus bools become 0 and 1, for instance.
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
+	optrepreq := ""
+	switch {
+	case isOptional(field):
+		optrepreq = "opt"
+	case isRequired(field):
+		optrepreq = "req"
+	case isRepeated(field):
+		optrepreq = "rep"
+	}
+	var defaultValue string
+	if dv := field.DefaultValue; dv != nil { // set means an explicit default
+		defaultValue = *dv
+		// Some types need tweaking.
+		switch *field.Type {
+		case descriptor.FieldDescriptorProto_TYPE_BOOL:
+			if defaultValue == "true" {
+				defaultValue = "1"
+			} else {
+				defaultValue = "0"
+			}
+		case descriptor.FieldDescriptorProto_TYPE_STRING,
+			descriptor.FieldDescriptorProto_TYPE_BYTES:
+			// Nothing to do. Quoting is done for the whole tag.
+		case descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// For enums we need to provide the integer constant.
+			obj := g.ObjectNamed(field.GetTypeName())
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// It is an enum that was publicly imported.
+				// We need the underlying type.
+				obj = id.o
+			}
+			enum, ok := obj.(*EnumDescriptor)
+			if !ok {
+				log.Printf("obj is a %T", obj)
+				if id, ok := obj.(*ImportedDescriptor); ok {
+					log.Printf("id.o is a %T", id.o)
+				}
+				g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
+			}
+			defaultValue = enum.integerValueAsString(defaultValue)
+		case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+				if f, err := strconv.ParseFloat(defaultValue, 32); err == nil {
+					defaultValue = fmt.Sprint(float32(f))
+				}
+			}
+		case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+				if f, err := strconv.ParseFloat(defaultValue, 64); err == nil {
+					defaultValue = fmt.Sprint(f)
+				}
+			}
+		}
+		defaultValue = ",def=" + defaultValue
+	}
+	enum := ""
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
+		// We avoid using obj.GoPackageName(), because we want to use the
+		// original (proto-world) package name.
+		obj := g.ObjectNamed(field.GetTypeName())
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			obj = id.o
+		}
+		enum = ",enum="
+		if pkg := obj.File().GetPackage(); pkg != "" {
+			enum += pkg + "."
+		}
+		enum += CamelCaseSlice(obj.TypeName())
+	}
+	packed := ""
+	if (field.Options != nil && field.Options.GetPacked()) ||
+		// Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
+		// "In proto3, repeated fields of scalar numeric types use packed encoding by default."
+		(message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
+			isRepeated(field) && isScalar(field)) {
+		packed = ",packed"
+	}
+	fieldName := field.GetName()
+	name := fieldName
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+		// We must use the type name for groups instead of
+		// the field name to preserve capitalization.
+		// type_name in FieldDescriptorProto is fully-qualified,
+		// but we only want the local part.
+		name = *field.TypeName
+		if i := strings.LastIndex(name, "."); i >= 0 {
+			name = name[i+1:]
+		}
+	}
+	if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name {
+		// TODO: escaping might be needed, in which case
+		// perhaps this should be in its own "json" tag.
+		name += ",json=" + json
+	}
+	name = ",name=" + name
+	if message.proto3() {
+		name += ",proto3"
+	}
+	oneof := ""
+	if field.OneofIndex != nil {
+		oneof = ",oneof"
+	}
+	return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
+		wiretype,
+		field.GetNumber(),
+		optrepreq,
+		packed,
+		name,
+		enum,
+		oneof,
+		defaultValue))
+}
+
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
+	switch typ {
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return false
+	}
+	return true
+}
+
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
+// TypeName drops the package name and underscores the rest.
+// Otherwise the object is from another package; and the result is the underscored
+// package name followed by the item name.
+// The result always has an initial capital.
+func (g *Generator) TypeName(obj Object) string {
+	return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
+}
+
+// GoType returns a string representing the type name, and the wire type
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
+	// TODO: Options.
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		typ, wire = "float64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		typ, wire = "float32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		typ, wire = "int64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		typ, wire = "uint64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
+		typ, wire = "int32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		typ, wire = "uint32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		typ, wire = "uint64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		typ, wire = "uint32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		typ, wire = "bool", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		typ, wire = "string", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "group"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		typ, wire = "[]byte", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = g.TypeName(desc), "varint"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		typ, wire = "int32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		typ, wire = "int64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		typ, wire = "int32", "zigzag32"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		typ, wire = "int64", "zigzag64"
+	default:
+		g.Fail("unknown type for", field.GetName())
+	}
+	if isRepeated(field) {
+		typ = "[]" + typ
+	} else if message != nil && message.proto3() {
+		return
+	} else if field.OneofIndex != nil && message != nil {
+		return
+	} else if needsStar(*field.Type) {
+		typ = "*" + typ
+	}
+	return
+}
+
+func (g *Generator) RecordTypeUse(t string) {
+	if _, ok := g.typeNameToObject[t]; !ok {
+		return
+	}
+	importPath := g.ObjectNamed(t).GoImportPath()
+	if importPath == g.outputImportPath {
+		// Don't record use of objects in our package.
+		return
+	}
+	g.AddImport(importPath)
+	g.usedPackages[importPath] = true
+}
+
+// Method names that may be generated.  Fields with these names get an
+// underscore appended. Any change to this set is a potential incompatible
+// API change because it changes generated field names.
+var methodNames = [...]string{
+	"Reset",
+	"String",
+	"ProtoMessage",
+	"Marshal",
+	"Unmarshal",
+	"ExtensionRangeArray",
+	"ExtensionMap",
+	"Descriptor",
+}
+
+// Names of messages in the `google.protobuf` package for which
+// we will generate XXX_WellKnownType methods.
+var wellKnownTypes = map[string]bool{
+	"Any":       true,
+	"Duration":  true,
+	"Empty":     true,
+	"Struct":    true,
+	"Timestamp": true,
+
+	"Value":       true,
+	"ListValue":   true,
+	"DoubleValue": true,
+	"FloatValue":  true,
+	"Int64Value":  true,
+	"UInt64Value": true,
+	"Int32Value":  true,
+	"UInt32Value": true,
+	"BoolValue":   true,
+	"StringValue": true,
+	"BytesValue":  true,
+}
+
+// getterDefault finds the default value for the field to return from a getter,
+// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
+func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
+	if isRepeated(field) {
+		return "nil"
+	}
+	if def := field.GetDefaultValue(); def != "" {
+		defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
+		if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
+			return defaultConstant
+		}
+		return "append([]byte(nil), " + defaultConstant + "...)"
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return "false"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return `""`
+	case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "nil"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		obj := g.ObjectNamed(field.GetTypeName())
+		var enum *EnumDescriptor
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			// The enum type has been publicly imported.
+			enum, _ = id.o.(*EnumDescriptor)
+		} else {
+			enum, _ = obj.(*EnumDescriptor)
+		}
+		if enum == nil {
+			log.Printf("don't know how to generate getter for %s", field.GetName())
+			return "nil"
+		}
+		if len(enum.Value) == 0 {
+			return "0 // empty enum"
+		}
+		first := enum.Value[0].GetName()
+		return g.DefaultPackageName(obj) + enum.prefix() + first
+	default:
+		return "0"
+	}
+}
+
+// defaultConstantName builds the name of the default constant from the message
+// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
+func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
+	return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
+}
+
+// The different types of fields in a message and how to actually print them
+// Most of the logic for generateMessage is in the methods of these types.
+//
+// Note that the content of the field is irrelevant, a simpleField can contain
+// anything from a scalar to a group (which is just a message).
+//
+// Extension fields (and message sets) are however handled separately.
+//
+// simpleField - a field that is neiter weak nor oneof, possibly repeated
+// oneofField - field containing list of subfields:
+// - oneofSubField - a field within the oneof
+
+// msgCtx contains the context for the generator functions.
+type msgCtx struct {
+	goName  string      // Go struct name of the message, e.g. MessageName
+	message *Descriptor // The descriptor for the message
+}
+
+// fieldCommon contains data common to all types of fields.
+type fieldCommon struct {
+	goName     string // Go name of field, e.g. "FieldName" or "Descriptor_"
+	protoName  string // Name of field in proto language, e.g. "field_name" or "descriptor"
+	getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
+	goType     string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
+	tags       string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
+	fullPath   string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
+}
+
+// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
+func (f *fieldCommon) getProtoName() string {
+	return f.protoName
+}
+
+// getGoType returns the go type of the field  as a string, e.g. "*int32".
+func (f *fieldCommon) getGoType() string {
+	return f.goType
+}
+
+// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
+type simpleField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	deprecated    string                               // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+	comment       string                               // The full comment for the field, e.g. "// Useful information"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *simpleField) decl(g *Generator, mc *msgCtx) {
+	g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
+}
+
+// getter prints the getter for the field.
+func (f *simpleField) getter(g *Generator, mc *msgCtx) {
+	star := ""
+	tname := f.goType
+	if needsStar(f.protoType) && tname[0] == '*' {
+		tname = tname[1:]
+		star = "*"
+	}
+	if f.deprecated != "" {
+		g.P(f.deprecated)
+	}
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
+	if f.getterDef == "nil" { // Simpler getter
+		g.P("if m != nil {")
+		g.P("return m." + f.goName)
+		g.P("}")
+		g.P("return nil")
+		g.P("}")
+		g.P()
+		return
+	}
+	if mc.message.proto3() {
+		g.P("if m != nil {")
+	} else {
+		g.P("if m != nil && m." + f.goName + " != nil {")
+	}
+	g.P("return " + star + "m." + f.goName)
+	g.P("}")
+	g.P("return ", f.getterDef)
+	g.P("}")
+	g.P()
+}
+
+// setter prints the setter method of the field.
+func (f *simpleField) setter(g *Generator, mc *msgCtx) {
+	// No setter for regular fields yet
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *simpleField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *simpleField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
+type oneofSubField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	oneofTypeName string                               // Type name of the enclosing struct, e.g. "MessageName_FieldName"
+	fieldNumber   int                                  // Actual field number, as defined in proto, e.g. 12
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+	deprecated    string                               // Deprecation comment, if any.
+}
+
+// typedNil prints a nil casted to the pointer to this field.
+// - for XXX_OneofWrappers
+func (f *oneofSubField) typedNil(g *Generator) {
+	g.P("(*", f.oneofTypeName, ")(nil),")
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *oneofSubField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *oneofSubField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofField represents the oneof on top level.
+// The alternative fields within the oneof are represented by oneofSubField.
+type oneofField struct {
+	fieldCommon
+	subFields []*oneofSubField // All the possible oneof fields
+	comment   string           // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *oneofField) decl(g *Generator, mc *msgCtx) {
+	comment := f.comment
+	for _, sf := range f.subFields {
+		comment += "//\t*" + sf.oneofTypeName + "\n"
+	}
+	g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
+}
+
+// getter for a oneof field will print additional discriminators and interfaces for the oneof,
+// also it prints all the getters for the sub fields.
+func (f *oneofField) getter(g *Generator, mc *msgCtx) {
+	// The discriminator type
+	g.P("type ", f.goType, " interface {")
+	g.P(f.goType, "()")
+	g.P("}")
+	g.P()
+	// The subField types, fulfilling the discriminator type contract
+	for _, sf := range f.subFields {
+		g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
+		g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
+		g.P("}")
+		g.P()
+	}
+	for _, sf := range f.subFields {
+		g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
+		g.P()
+	}
+	// Getter for the oneof field
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
+	g.P("if m != nil { return m.", f.goName, " }")
+	g.P("return nil")
+	g.P("}")
+	g.P()
+	// Getters for each oneof
+	for _, sf := range f.subFields {
+		if sf.deprecated != "" {
+			g.P(sf.deprecated)
+		}
+		g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
+		g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
+		g.P("return x.", sf.goName)
+		g.P("}")
+		g.P("return ", sf.getterDef)
+		g.P("}")
+		g.P()
+	}
+}
+
+// setter prints the setter method of the field.
+func (f *oneofField) setter(g *Generator, mc *msgCtx) {
+	// No setters for oneof yet
+}
+
+// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
+type topLevelField interface {
+	decl(g *Generator, mc *msgCtx)   // print declaration within the struct
+	getter(g *Generator, mc *msgCtx) // print getter
+	setter(g *Generator, mc *msgCtx) // print setter if applicable
+}
+
+// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
+type defField interface {
+	getProtoDef() string                                // default value explicitly stated in the proto file, e.g "yoshi" or "5"
+	getProtoName() string                               // proto name of a field, e.g. "field_name" or "descriptor"
+	getGoType() string                                  // go type of the field  as a string, e.g. "*int32"
+	getProtoTypeName() string                           // protobuf type name for the field, e.g. ".google.protobuf.Duration"
+	getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+}
+
+// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
+// explicit in the proto.
+func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
+	// Collect fields that can have defaults
+	dFields := []defField{}
+	for _, pf := range topLevelFields {
+		if f, ok := pf.(*oneofField); ok {
+			for _, osf := range f.subFields {
+				dFields = append(dFields, osf)
+			}
+			continue
+		}
+		dFields = append(dFields, pf.(defField))
+	}
+	for _, df := range dFields {
+		def := df.getProtoDef()
+		if def == "" {
+			continue
+		}
+		fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
+		typename := df.getGoType()
+		if typename[0] == '*' {
+			typename = typename[1:]
+		}
+		kind := "const "
+		switch {
+		case typename == "bool":
+		case typename == "string":
+			def = strconv.Quote(def)
+		case typename == "[]byte":
+			def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
+			kind = "var "
+		case def == "inf", def == "-inf", def == "nan":
+			// These names are known to, and defined by, the protocol language.
+			switch def {
+			case "inf":
+				def = "math.Inf(1)"
+			case "-inf":
+				def = "math.Inf(-1)"
+			case "nan":
+				def = "math.NaN()"
+			}
+			if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
+				def = "float32(" + def + ")"
+			}
+			kind = "var "
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT:
+			if f, err := strconv.ParseFloat(def, 32); err == nil {
+				def = fmt.Sprint(float32(f))
+			}
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+			if f, err := strconv.ParseFloat(def, 64); err == nil {
+				def = fmt.Sprint(f)
+			}
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// Must be an enum.  Need to construct the prefixed name.
+			obj := g.ObjectNamed(df.getProtoTypeName())
+			var enum *EnumDescriptor
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// The enum type has been publicly imported.
+				enum, _ = id.o.(*EnumDescriptor)
+			} else {
+				enum, _ = obj.(*EnumDescriptor)
+			}
+			if enum == nil {
+				log.Printf("don't know how to generate constant for %s", fieldname)
+				continue
+			}
+			def = g.DefaultPackageName(obj) + enum.prefix() + def
+		}
+		g.P(kind, fieldname, " ", typename, " = ", def)
+		g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
+	}
+	g.P()
+}
+
+// generateInternalStructFields just adds the XXX_<something> fields to the message struct.
+func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
+	g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
+	if len(mc.message.ExtensionRange) > 0 {
+		messageset := ""
+		if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+			messageset = "protobuf_messageset:\"1\" "
+		}
+		g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
+	}
+	g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
+	g.P("XXX_sizecache\tint32 `json:\"-\"`")
+
+}
+
+// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer.
+func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
+	ofields := []*oneofField{}
+	for _, f := range topLevelFields {
+		if o, ok := f.(*oneofField); ok {
+			ofields = append(ofields, o)
+		}
+	}
+	if len(ofields) == 0 {
+		return
+	}
+
+	// OneofFuncs
+	g.P("// XXX_OneofWrappers is for the internal use of the proto package.")
+	g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {")
+	g.P("return []interface{}{")
+	for _, of := range ofields {
+		for _, sf := range of.subFields {
+			sf.typedNil(g)
+		}
+	}
+	g.P("}")
+	g.P("}")
+	g.P()
+}
+
+// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
+func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
+	comments := g.PrintComments(mc.message.path)
+
+	// Guarantee deprecation comments appear after user-provided comments.
+	if mc.message.GetOptions().GetDeprecated() {
+		if comments {
+			// Convention: Separate deprecation comments from original
+			// comments with an empty line.
+			g.P("//")
+		}
+		g.P(deprecationComment)
+	}
+
+	g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
+	for _, pf := range topLevelFields {
+		pf.decl(g, mc)
+	}
+	g.generateInternalStructFields(mc, topLevelFields)
+	g.P("}")
+}
+
+// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.getter(g, mc)
+	}
+}
+
+// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.setter(g, mc)
+	}
+}
+
+// generateCommonMethods adds methods to the message that are not on a per field basis.
+func (g *Generator) generateCommonMethods(mc *msgCtx) {
+	// Reset, String and ProtoMessage methods.
+	g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
+	g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
+	g.P("func (*", mc.goName, ") ProtoMessage() {}")
+	var indexes []string
+	for m := mc.message; m != nil; m = m.parent {
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	g.P()
+	// TODO: Revisit the decision to use a XXX_WellKnownType method
+	// if we change proto.MessageName to work with multiple equivalents.
+	if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
+		g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
+		g.P()
+	}
+
+	// Extension support methods
+	if len(mc.message.ExtensionRange) > 0 {
+		g.P()
+		g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
+		for _, r := range mc.message.ExtensionRange {
+			end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
+			g.P("{Start: ", r.Start, ", End: ", end, "},")
+		}
+		g.P("}")
+		g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
+		g.P("return extRange_", mc.goName)
+		g.P("}")
+		g.P()
+	}
+
+	// TODO: It does not scale to keep adding another method for every
+	// operation on protos that we want to switch over to using the
+	// table-driven approach. Instead, we should only add a single method
+	// that allows getting access to the *InternalMessageInfo struct and then
+	// calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
+
+	// Wrapper for table-driven marshaling and unmarshaling.
+	g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
+	g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
+	g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
+	g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
+	g.P("}")
+
+	g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
+	g.P()
+}
+
+// Generate the type, methods and default constant definitions for this Descriptor.
+func (g *Generator) generateMessage(message *Descriptor) {
+	topLevelFields := []topLevelField{}
+	oFields := make(map[int32]*oneofField)
+	// The full type name
+	typeName := message.TypeName()
+	// The full type name, CamelCased.
+	goTypeName := CamelCaseSlice(typeName)
+
+	usedNames := make(map[string]bool)
+	for _, n := range methodNames {
+		usedNames[n] = true
+	}
+
+	// allocNames finds a conflict-free variation of the given strings,
+	// consistently mutating their suffixes.
+	// It returns the same number of strings.
+	allocNames := func(ns ...string) []string {
+	Loop:
+		for {
+			for _, n := range ns {
+				if usedNames[n] {
+					for i := range ns {
+						ns[i] += "_"
+					}
+					continue Loop
+				}
+			}
+			for _, n := range ns {
+				usedNames[n] = true
+			}
+			return ns
+		}
+	}
+
+	mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
+
+	// Build a structure more suitable for generating the text in one pass
+	for i, field := range message.Field {
+		// Allocate the getter and the field at the same time so name
+		// collisions create field/method consistent names.
+		// TODO: This allocation occurs based on the order of the fields
+		// in the proto file, meaning that a change in the field
+		// ordering can change generated Method/Field names.
+		base := CamelCase(*field.Name)
+		ns := allocNames(base, "Get"+base)
+		fieldName, fieldGetterName := ns[0], ns[1]
+		typename, wiretype := g.GoType(message, field)
+		jsonName := *field.Name
+		tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
+
+		oneof := field.OneofIndex != nil
+		if oneof && oFields[*field.OneofIndex] == nil {
+			odp := message.OneofDecl[int(*field.OneofIndex)]
+			base := CamelCase(odp.GetName())
+			fname := allocNames(base)[0]
+
+			// This is the first field of a oneof we haven't seen before.
+			// Generate the union field.
+			oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
+			c, ok := g.makeComments(oneofFullPath)
+			if ok {
+				c += "\n//\n"
+			}
+			c += "// Types that are valid to be assigned to " + fname + ":\n"
+			// Generate the rest of this comment later,
+			// when we've computed any disambiguation.
+
+			dname := "is" + goTypeName + "_" + fname
+			tag := `protobuf_oneof:"` + odp.GetName() + `"`
+			of := oneofField{
+				fieldCommon: fieldCommon{
+					goName:     fname,
+					getterName: "Get"+fname,
+					goType:     dname,
+					tags:       tag,
+					protoName:  odp.GetName(),
+					fullPath:   oneofFullPath,
+				},
+				comment: c,
+			}
+			topLevelFields = append(topLevelFields, &of)
+			oFields[*field.OneofIndex] = &of
+		}
+
+		if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+			desc := g.ObjectNamed(field.GetTypeName())
+			if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
+				// Figure out the Go types and tags for the key and value types.
+				keyField, valField := d.Field[0], d.Field[1]
+				keyType, keyWire := g.GoType(d, keyField)
+				valType, valWire := g.GoType(d, valField)
+				keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
+
+				// We don't use stars, except for message-typed values.
+				// Message and enum types are the only two possibly foreign types used in maps,
+				// so record their use. They are not permitted as map keys.
+				keyType = strings.TrimPrefix(keyType, "*")
+				switch *valField.Type {
+				case descriptor.FieldDescriptorProto_TYPE_ENUM:
+					valType = strings.TrimPrefix(valType, "*")
+					g.RecordTypeUse(valField.GetTypeName())
+				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+					g.RecordTypeUse(valField.GetTypeName())
+				default:
+					valType = strings.TrimPrefix(valType, "*")
+				}
+
+				typename = fmt.Sprintf("map[%s]%s", keyType, valType)
+				mapFieldTypes[field] = typename // record for the getter generation
+
+				tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
+			}
+		}
+
+		fieldDeprecated := ""
+		if field.GetOptions().GetDeprecated() {
+			fieldDeprecated = deprecationComment
+		}
+
+		dvalue := g.getterDefault(field, goTypeName)
+		if oneof {
+			tname := goTypeName + "_" + fieldName
+			// It is possible for this to collide with a message or enum
+			// nested in this message. Check for collisions.
+			for {
+				ok := true
+				for _, desc := range message.nested {
+					if CamelCaseSlice(desc.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				for _, enum := range message.enums {
+					if CamelCaseSlice(enum.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				if !ok {
+					tname += "_"
+					continue
+				}
+				break
+			}
+
+			oneofField := oFields[*field.OneofIndex]
+			tag := "protobuf:" + g.goTag(message, field, wiretype)
+			sf := oneofSubField{
+				fieldCommon: fieldCommon{
+					goName:     fieldName,
+					getterName: fieldGetterName,
+					goType:     typename,
+					tags:       tag,
+					protoName:  field.GetName(),
+					fullPath:   fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
+				},
+				protoTypeName: field.GetTypeName(),
+				fieldNumber:   int(*field.Number),
+				protoType:     *field.Type,
+				getterDef:     dvalue,
+				protoDef:      field.GetDefaultValue(),
+				oneofTypeName: tname,
+				deprecated:    fieldDeprecated,
+			}
+			oneofField.subFields = append(oneofField.subFields, &sf)
+			g.RecordTypeUse(field.GetTypeName())
+			continue
+		}
+
+		fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
+		c, ok := g.makeComments(fieldFullPath)
+		if ok {
+			c += "\n"
+		}
+		rf := simpleField{
+			fieldCommon: fieldCommon{
+				goName:     fieldName,
+				getterName: fieldGetterName,
+				goType:     typename,
+				tags:       tag,
+				protoName:  field.GetName(),
+				fullPath:   fieldFullPath,
+			},
+			protoTypeName: field.GetTypeName(),
+			protoType:     *field.Type,
+			deprecated:    fieldDeprecated,
+			getterDef:     dvalue,
+			protoDef:      field.GetDefaultValue(),
+			comment:       c,
+		}
+		var pf topLevelField = &rf
+
+		topLevelFields = append(topLevelFields, pf)
+		g.RecordTypeUse(field.GetTypeName())
+	}
+
+	mc := &msgCtx{
+		goName:  goTypeName,
+		message: message,
+	}
+
+	g.generateMessageStruct(mc, topLevelFields)
+	g.P()
+	g.generateCommonMethods(mc)
+	g.P()
+	g.generateDefaultConstants(mc, topLevelFields)
+	g.P()
+	g.generateGetters(mc, topLevelFields)
+	g.P()
+	g.generateSetters(mc, topLevelFields)
+	g.P()
+	g.generateOneofFuncs(mc, topLevelFields)
+	g.P()
+
+	var oneofTypes []string
+	for _, f := range topLevelFields {
+		if of, ok := f.(*oneofField); ok {
+			for _, osf := range of.subFields {
+				oneofTypes = append(oneofTypes, osf.oneofTypeName)
+			}
+		}
+	}
+
+	opts := message.Options
+	ms := &messageSymbol{
+		sym:           goTypeName,
+		hasExtensions: len(message.ExtensionRange) > 0,
+		isMessageSet:  opts != nil && opts.GetMessageSetWireFormat(),
+		oneofTypes:    oneofTypes,
+	}
+	g.file.addExport(message, ms)
+
+	for _, ext := range message.ext {
+		g.generateExtension(ext)
+	}
+
+	fullName := strings.Join(message.TypeName(), ".")
+	if g.file.Package != nil {
+		fullName = *g.file.Package + "." + fullName
+	}
+
+	g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
+	// Register types for native map types.
+	for _, k := range mapFieldKeys(mapFieldTypes) {
+		fullName := strings.TrimPrefix(*k.TypeName, ".")
+		g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
+	}
+
+}
+
+type byTypeName []*descriptor.FieldDescriptorProto
+
+func (a byTypeName) Len() int           { return len(a) }
+func (a byTypeName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
+
+// mapFieldKeys returns the keys of m in a consistent order.
+func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
+	keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
+	for k := range m {
+		keys = append(keys, k)
+	}
+	sort.Sort(byTypeName(keys))
+	return keys
+}
+
+var escapeChars = [256]byte{
+	'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+	// NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+	// single and double quotes, but strconv.Unquote only allows one or the
+	// other (based on actual surrounding quotes of its input argument).
+
+	var out []byte
+	for len(s) > 0 {
+		// regular character, or too short to be valid escape
+		if s[0] != '\\' || len(s) < 2 {
+			out = append(out, s[0])
+			s = s[1:]
+		} else if c := escapeChars[s[1]]; c != 0 {
+			// escape sequence
+			out = append(out, c)
+			s = s[2:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			// hex escape, e.g. "\x80
+			if len(s) < 4 {
+				// too short to be valid
+				out = append(out, s[:2]...)
+				s = s[2:]
+				continue
+			}
+			v, err := strconv.ParseUint(s[2:4], 16, 8)
+			if err != nil {
+				out = append(out, s[:4]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[4:]
+		} else if '0' <= s[1] && s[1] <= '7' {
+			// octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+			// so consume up to 2 more bytes or up to end-of-string
+			n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+			if n > 3 {
+				n = 3
+			}
+			v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[1+n:]
+		} else {
+			// bad escape, just propagate the slash as-is
+			out = append(out, s[0])
+			s = s[1:]
+		}
+	}
+
+	return string(out)
+}
+
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
+	ccTypeName := ext.DescName()
+
+	extObj := g.ObjectNamed(*ext.Extendee)
+	var extDesc *Descriptor
+	if id, ok := extObj.(*ImportedDescriptor); ok {
+		// This is extending a publicly imported message.
+		// We need the underlying type for goTag.
+		extDesc = id.o.(*Descriptor)
+	} else {
+		extDesc = extObj.(*Descriptor)
+	}
+	extendedType := "*" + g.TypeName(extObj) // always use the original
+	field := ext.FieldDescriptorProto
+	fieldType, wireType := g.GoType(ext.parent, field)
+	tag := g.goTag(extDesc, field, wireType)
+	g.RecordTypeUse(*ext.Extendee)
+	if n := ext.FieldDescriptorProto.TypeName; n != nil {
+		// foreign extension type
+		g.RecordTypeUse(*n)
+	}
+
+	typeName := ext.TypeName()
+
+	// Special case for proto2 message sets: If this extension is extending
+	// proto2.bridge.MessageSet, and its final name component is "message_set_extension",
+	// then drop that last component.
+	//
+	// TODO: This should be implemented in the text formatter rather than the generator.
+	// In addition, the situation for when to apply this special case is implemented
+	// differently in other languages:
+	// https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
+	if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
+		typeName = typeName[:len(typeName)-1]
+	}
+
+	// For text formatting, the package must be exactly what the .proto file declares,
+	// ignoring overrides such as the go_package option, and with no dot/underscore mapping.
+	extName := strings.Join(typeName, ".")
+	if g.file.Package != nil {
+		extName = *g.file.Package + "." + extName
+	}
+
+	g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
+	g.P("ExtendedType: (", extendedType, ")(nil),")
+	g.P("ExtensionType: (", fieldType, ")(nil),")
+	g.P("Field: ", field.Number, ",")
+	g.P(`Name: "`, extName, `",`)
+	g.P("Tag: ", tag, ",")
+	g.P(`Filename: "`, g.file.GetName(), `",`)
+
+	g.P("}")
+	g.P()
+
+	g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
+
+	g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
+}
+
+func (g *Generator) generateInitFunction() {
+	if len(g.init) == 0 {
+		return
+	}
+	g.P("func init() {")
+	for _, l := range g.init {
+		g.P(l)
+	}
+	g.P("}")
+	g.init = nil
+}
+
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
+	// Make a copy and trim source_code_info data.
+	// TODO: Trim this more when we know exactly what we need.
+	pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
+	pb.SourceCodeInfo = nil
+
+	b, err := proto.Marshal(pb)
+	if err != nil {
+		g.Fail(err.Error())
+	}
+
+	var buf bytes.Buffer
+	w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+	w.Write(b)
+	w.Close()
+	b = buf.Bytes()
+
+	v := file.VarName()
+	g.P()
+	g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
+	g.P("var ", v, " = []byte{")
+	g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
+	for len(b) > 0 {
+		n := 16
+		if n > len(b) {
+			n = len(b)
+		}
+
+		s := ""
+		for _, c := range b[:n] {
+			s += fmt.Sprintf("0x%02x,", c)
+		}
+		g.P(s)
+
+		b = b[n:]
+	}
+	g.P("}")
+}
+
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
+	// // We always print the full (proto-world) package name here.
+	pkg := enum.File().GetPackage()
+	if pkg != "" {
+		pkg += "."
+	}
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
+}
+
+// And now lots of helper functions.
+
+// Is c an ASCII lower-case letter?
+func isASCIILower(c byte) bool {
+	return 'a' <= c && c <= 'z'
+}
+
+// Is c an ASCII digit?
+func isASCIIDigit(c byte) bool {
+	return '0' <= c && c <= '9'
+}
+
+// CamelCase returns the CamelCased name.
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+// There is a remote possibility of this rewrite causing a name collision,
+// but it's so remote we're prepared to pretend it's nonexistent - since the
+// C++ generator lowercases names, it's extremely unlikely to have two fields
+// with different capitalizations.
+// In short, _my_field_name_2 becomes XMyFieldName_2.
+func CamelCase(s string) string {
+	if s == "" {
+		return ""
+	}
+	t := make([]byte, 0, 32)
+	i := 0
+	if s[0] == '_' {
+		// Need a capital letter; drop the '_'.
+		t = append(t, 'X')
+		i++
+	}
+	// Invariant: if the next letter is lower case, it must be converted
+	// to upper case.
+	// That is, we process a word at a time, where words are marked by _ or
+	// upper case letter. Digits are treated as words.
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
+			continue // Skip the underscore in s.
+		}
+		if isASCIIDigit(c) {
+			t = append(t, c)
+			continue
+		}
+		// Assume we have a letter now - if not, it's a bogus identifier.
+		// The next word is a sequence of characters that must start upper case.
+		if isASCIILower(c) {
+			c ^= ' ' // Make it a capital letter.
+		}
+		t = append(t, c) // Guaranteed not lower case.
+		// Accept lower case sequence that follows.
+		for i+1 < len(s) && isASCIILower(s[i+1]) {
+			i++
+			t = append(t, s[i])
+		}
+	}
+	return string(t)
+}
+
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
+// be joined with "_".
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
+
+// dottedSlice turns a sliced name into a dotted name.
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
+
+// Is this field optional?
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+// Is this field required?
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// Is this field repeated?
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// Is this field a scalar numeric type?
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
+	if field.Type == nil {
+		return false
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return true
+	default:
+		return false
+	}
+}
+
+// badToUnderscore is the mapping function used to generate Go names from package names,
+// which can be dotted in the input .proto file.  It replaces non-identifier characters such as
+// dot or dash with underscore.
+func badToUnderscore(r rune) rune {
+	if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+		return r
+	}
+	return '_'
+}
+
+// baseName returns the last path element of the name, with the last dotted suffix removed.
+func baseName(name string) string {
+	// First, find the last element
+	if i := strings.LastIndex(name, "/"); i >= 0 {
+		name = name[i+1:]
+	}
+	// Now drop the suffix
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		name = name[0:i]
+	}
+	return name
+}
+
+// The SourceCodeInfo message describes the location of elements of a parsed
+// .proto file by way of a "path", which is a sequence of integers that
+// describe the route from a FileDescriptorProto to the relevant submessage.
+// The path alternates between a field number of a repeated field, and an index
+// into that repeated field. The constants below define the field numbers that
+// are used.
+//
+// See descriptor.proto for more information about this.
+const (
+	// tag numbers in FileDescriptorProto
+	packagePath = 2 // package
+	messagePath = 4 // message_type
+	enumPath    = 5 // enum_type
+	// tag numbers in DescriptorProto
+	messageFieldPath   = 2 // field
+	messageMessagePath = 3 // nested_type
+	messageEnumPath    = 4 // enum_type
+	messageOneofPath   = 8 // oneof_decl
+	// tag numbers in EnumDescriptorProto
+	enumValuePath = 2 // value
+)
+
+var supportTypeAliases bool
+
+func init() {
+	for _, tag := range build.Default.ReleaseTags {
+		if tag == "go1.9" {
+			supportTypeAliases = true
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
new file mode 100644
index 0000000..a9b6103
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
@@ -0,0 +1,117 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package remap handles tracking the locations of Go tokens in a source text
+across a rewrite by the Go formatter.
+*/
+package remap
+
+import (
+	"fmt"
+	"go/scanner"
+	"go/token"
+)
+
+// A Location represents a span of byte offsets in the source text.
+type Location struct {
+	Pos, End int // End is exclusive
+}
+
+// A Map represents a mapping between token locations in an input source text
+// and locations in the correspnding output text.
+type Map map[Location]Location
+
+// Find reports whether the specified span is recorded by m, and if so returns
+// the new location it was mapped to. If the input span was not found, the
+// returned location is the same as the input.
+func (m Map) Find(pos, end int) (Location, bool) {
+	key := Location{
+		Pos: pos,
+		End: end,
+	}
+	if loc, ok := m[key]; ok {
+		return loc, true
+	}
+	return key, false
+}
+
+func (m Map) add(opos, oend, npos, nend int) {
+	m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
+}
+
+// Compute constructs a location mapping from input to output.  An error is
+// reported if any of the tokens of output cannot be mapped.
+func Compute(input, output []byte) (Map, error) {
+	itok := tokenize(input)
+	otok := tokenize(output)
+	if len(itok) != len(otok) {
+		return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
+	}
+	m := make(Map)
+	for i, ti := range itok {
+		to := otok[i]
+		if ti.Token != to.Token {
+			return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
+		}
+		m.add(ti.pos, ti.end, to.pos, to.end)
+	}
+	return m, nil
+}
+
+// tokinfo records the span and type of a source token.
+type tokinfo struct {
+	pos, end int
+	token.Token
+}
+
+func tokenize(src []byte) []tokinfo {
+	fs := token.NewFileSet()
+	var s scanner.Scanner
+	s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
+	var info []tokinfo
+	for {
+		pos, next, lit := s.Scan()
+		switch next {
+		case token.SEMICOLON:
+			continue
+		}
+		info = append(info, tokinfo{
+			pos:   int(pos - 1),
+			end:   int(pos + token.Pos(len(lit)) - 1),
+			Token: next,
+		})
+		if next == token.EOF {
+			break
+		}
+	}
+	return info
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
new file mode 100644
index 0000000..5d1e3f0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
@@ -0,0 +1,537 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package grpc outputs gRPC service descriptions in Go code.
+// It runs as a plugin for the Go protocol buffer compiler plugin.
+// It is linked in to protoc-gen-go.
+package grpc
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// the grpc package is introduced; the generated code references
+// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 4
+
+// Paths for packages used by code generated in this file,
+// relative to the import_prefix of the generator.Generator.
+const (
+	contextPkgPath = "context"
+	grpcPkgPath    = "google.golang.org/grpc"
+	codePkgPath    = "google.golang.org/grpc/codes"
+	statusPkgPath  = "google.golang.org/grpc/status"
+)
+
+func init() {
+	generator.RegisterPlugin(new(grpc))
+}
+
+// grpc is an implementation of the Go protocol buffer compiler's
+// plugin architecture.  It generates bindings for gRPC support.
+type grpc struct {
+	gen *generator.Generator
+}
+
+// Name returns the name of this plugin, "grpc".
+func (g *grpc) Name() string {
+	return "grpc"
+}
+
+// The names for packages imported in the generated code.
+// They may vary from the final path component of the import path
+// if the name is used by other packages.
+var (
+	contextPkg string
+	grpcPkg    string
+)
+
+// Init initializes the plugin.
+func (g *grpc) Init(gen *generator.Generator) {
+	g.gen = gen
+}
+
+// Given a type name defined in a .proto, return its object.
+// Also record that we're using it, to guarantee the associated import.
+func (g *grpc) objectNamed(name string) generator.Object {
+	g.gen.RecordTypeUse(name)
+	return g.gen.ObjectNamed(name)
+}
+
+// Given a type name defined in a .proto, return its name as we will print it.
+func (g *grpc) typeName(str string) string {
+	return g.gen.TypeName(g.objectNamed(str))
+}
+
+// P forwards to g.gen.P.
+func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
+
+// Generate generates code for the services in the given file.
+func (g *grpc) Generate(file *generator.FileDescriptor) {
+	if len(file.FileDescriptorProto.Service) == 0 {
+		return
+	}
+
+	contextPkg = string(g.gen.AddImport(contextPkgPath))
+	grpcPkg = string(g.gen.AddImport(grpcPkgPath))
+
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
+	g.P("var _ ", contextPkg, ".Context")
+	g.P("var _ ", grpcPkg, ".ClientConn")
+	g.P()
+
+	// Assert version compatibility.
+	g.P("// This is a compile-time assertion to ensure that this generated file")
+	g.P("// is compatible with the grpc package it is being compiled against.")
+	g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
+	g.P()
+
+	for i, service := range file.FileDescriptorProto.Service {
+		g.generateService(file, service, i)
+	}
+}
+
+// GenerateImports generates the import declaration for this file.
+func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
+}
+
+// reservedClientName records whether a client name is reserved on the client side.
+var reservedClientName = map[string]bool{
+	// TODO: do we need any in gRPC?
+}
+
+func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// generateService generates all the code for the named service.
+func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
+	path := fmt.Sprintf("6,%d", index) // 6 means service.
+
+	origServName := service.GetName()
+	fullServName := origServName
+	if pkg := file.GetPackage(); pkg != "" {
+		fullServName = pkg + "." + fullServName
+	}
+	servName := generator.CamelCase(origServName)
+	deprecated := service.GetOptions().GetDeprecated()
+
+	g.P()
+	g.P(fmt.Sprintf(`// %sClient is the client API for %s service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName))
+
+	// Client interface.
+	if deprecated {
+		g.P("//")
+		g.P(deprecationComment)
+	}
+	g.P("type ", servName, "Client interface {")
+	for i, method := range service.Method {
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+		g.P(g.generateClientSignature(servName, method))
+	}
+	g.P("}")
+	g.P()
+
+	// Client structure.
+	g.P("type ", unexport(servName), "Client struct {")
+	g.P("cc *", grpcPkg, ".ClientConn")
+	g.P("}")
+	g.P()
+
+	// NewClient factory.
+	if deprecated {
+		g.P(deprecationComment)
+	}
+	g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {")
+	g.P("return &", unexport(servName), "Client{cc}")
+	g.P("}")
+	g.P()
+
+	var methodIndex, streamIndex int
+	serviceDescVar := "_" + servName + "_serviceDesc"
+	// Client method implementations.
+	for _, method := range service.Method {
+		var descExpr string
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
+			// Unary RPC method
+			descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
+			methodIndex++
+		} else {
+			// Streaming RPC method
+			descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
+			streamIndex++
+		}
+		g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
+	}
+
+	// Server interface.
+	serverType := servName + "Server"
+	g.P("// ", serverType, " is the server API for ", servName, " service.")
+	if deprecated {
+		g.P("//")
+		g.P(deprecationComment)
+	}
+	g.P("type ", serverType, " interface {")
+	for i, method := range service.Method {
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+		g.P(g.generateServerSignature(servName, method))
+	}
+	g.P("}")
+	g.P()
+
+	// Server Unimplemented struct for forward compatability.
+	if deprecated {
+		g.P(deprecationComment)
+	}
+	g.generateUnimplementedServer(servName, service)
+
+	// Server registration.
+	if deprecated {
+		g.P(deprecationComment)
+	}
+	g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
+	g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
+	g.P("}")
+	g.P()
+
+	// Server handler implementations.
+	var handlerNames []string
+	for _, method := range service.Method {
+		hname := g.generateServerMethod(servName, fullServName, method)
+		handlerNames = append(handlerNames, hname)
+	}
+
+	// Service descriptor.
+	g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
+	g.P("ServiceName: ", strconv.Quote(fullServName), ",")
+	g.P("HandlerType: (*", serverType, ")(nil),")
+	g.P("Methods: []", grpcPkg, ".MethodDesc{")
+	for i, method := range service.Method {
+		if method.GetServerStreaming() || method.GetClientStreaming() {
+			continue
+		}
+		g.P("{")
+		g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
+		g.P("Handler: ", handlerNames[i], ",")
+		g.P("},")
+	}
+	g.P("},")
+	g.P("Streams: []", grpcPkg, ".StreamDesc{")
+	for i, method := range service.Method {
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
+			continue
+		}
+		g.P("{")
+		g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
+		g.P("Handler: ", handlerNames[i], ",")
+		if method.GetServerStreaming() {
+			g.P("ServerStreams: true,")
+		}
+		if method.GetClientStreaming() {
+			g.P("ClientStreams: true,")
+		}
+		g.P("},")
+	}
+	g.P("},")
+	g.P("Metadata: \"", file.GetName(), "\",")
+	g.P("}")
+	g.P()
+}
+
+// generateUnimplementedServer creates the unimplemented server struct
+func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) {
+	serverType := servName + "Server"
+	g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.")
+	g.P("type Unimplemented", serverType, " struct {")
+	g.P("}")
+	g.P()
+	// Unimplemented<service_name>Server's concrete methods
+	for _, method := range service.Method {
+		g.generateServerMethodConcrete(servName, method)
+	}
+	g.P()
+}
+
+// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility
+func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) {
+	header := g.generateServerSignatureWithParamNames(servName, method)
+	g.P("func (*Unimplemented", servName, "Server) ", header, " {")
+	var nilArg string
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		nilArg = "nil, "
+	}
+	methName := generator.CamelCase(method.GetName())
+	statusPkg := string(g.gen.AddImport(statusPkgPath))
+	codePkg := string(g.gen.AddImport(codePkgPath))
+	g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`)
+	g.P("}")
+}
+
+// generateClientSignature returns the client-side signature for a method.
+func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
+	origMethName := method.GetName()
+	methName := generator.CamelCase(origMethName)
+	if reservedClientName[methName] {
+		methName += "_"
+	}
+	reqArg := ", in *" + g.typeName(method.GetInputType())
+	if method.GetClientStreaming() {
+		reqArg = ""
+	}
+	respName := "*" + g.typeName(method.GetOutputType())
+	if method.GetServerStreaming() || method.GetClientStreaming() {
+		respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
+	}
+	return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
+}
+
+func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
+	sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
+	methName := generator.CamelCase(method.GetName())
+	inType := g.typeName(method.GetInputType())
+	outType := g.typeName(method.GetOutputType())
+
+	if method.GetOptions().GetDeprecated() {
+		g.P(deprecationComment)
+	}
+	g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		g.P("out := new(", outType, ")")
+		// TODO: Pass descExpr to Invoke.
+		g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
+		g.P("if err != nil { return nil, err }")
+		g.P("return out, nil")
+		g.P("}")
+		g.P()
+		return
+	}
+	streamType := unexport(servName) + methName + "Client"
+	g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`)
+	g.P("if err != nil { return nil, err }")
+	g.P("x := &", streamType, "{stream}")
+	if !method.GetClientStreaming() {
+		g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+	}
+	g.P("return x, nil")
+	g.P("}")
+	g.P()
+
+	genSend := method.GetClientStreaming()
+	genRecv := method.GetServerStreaming()
+	genCloseAndRecv := !method.GetServerStreaming()
+
+	// Stream auxiliary types and methods.
+	g.P("type ", servName, "_", methName, "Client interface {")
+	if genSend {
+		g.P("Send(*", inType, ") error")
+	}
+	if genRecv {
+		g.P("Recv() (*", outType, ", error)")
+	}
+	if genCloseAndRecv {
+		g.P("CloseAndRecv() (*", outType, ", error)")
+	}
+	g.P(grpcPkg, ".ClientStream")
+	g.P("}")
+	g.P()
+
+	g.P("type ", streamType, " struct {")
+	g.P(grpcPkg, ".ClientStream")
+	g.P("}")
+	g.P()
+
+	if genSend {
+		g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
+		g.P("return x.ClientStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genRecv {
+		g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
+		g.P("m := new(", outType, ")")
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+	if genCloseAndRecv {
+		g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+		g.P("m := new(", outType, ")")
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+}
+
+// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names.
+func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string {
+	origMethName := method.GetName()
+	methName := generator.CamelCase(origMethName)
+	if reservedClientName[methName] {
+		methName += "_"
+	}
+
+	var reqArgs []string
+	ret := "error"
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, "ctx "+contextPkg+".Context")
+		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
+	}
+	if !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType()))
+	}
+	if method.GetServerStreaming() || method.GetClientStreaming() {
+		reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server")
+	}
+
+	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
+}
+
+// generateServerSignature returns the server-side signature for a method.
+func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
+	origMethName := method.GetName()
+	methName := generator.CamelCase(origMethName)
+	if reservedClientName[methName] {
+		methName += "_"
+	}
+
+	var reqArgs []string
+	ret := "error"
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, contextPkg+".Context")
+		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
+	}
+	if !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
+	}
+	if method.GetServerStreaming() || method.GetClientStreaming() {
+		reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
+	}
+
+	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
+}
+
+func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
+	methName := generator.CamelCase(method.GetName())
+	hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
+	inType := g.typeName(method.GetInputType())
+	outType := g.typeName(method.GetOutputType())
+
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
+		g.P("in := new(", inType, ")")
+		g.P("if err := dec(in); err != nil { return nil, err }")
+		g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
+		g.P("info := &", grpcPkg, ".UnaryServerInfo{")
+		g.P("Server: srv,")
+		g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
+		g.P("}")
+		g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
+		g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
+		g.P("}")
+		g.P("return interceptor(ctx, in, info, handler)")
+		g.P("}")
+		g.P()
+		return hname
+	}
+	streamType := unexport(servName) + methName + "Server"
+	g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
+	if !method.GetClientStreaming() {
+		g.P("m := new(", inType, ")")
+		g.P("if err := stream.RecvMsg(m); err != nil { return err }")
+		g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
+	} else {
+		g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
+	}
+	g.P("}")
+	g.P()
+
+	genSend := method.GetServerStreaming()
+	genSendAndClose := !method.GetServerStreaming()
+	genRecv := method.GetClientStreaming()
+
+	// Stream auxiliary types and methods.
+	g.P("type ", servName, "_", methName, "Server interface {")
+	if genSend {
+		g.P("Send(*", outType, ") error")
+	}
+	if genSendAndClose {
+		g.P("SendAndClose(*", outType, ") error")
+	}
+	if genRecv {
+		g.P("Recv() (*", inType, ", error)")
+	}
+	g.P(grpcPkg, ".ServerStream")
+	g.P("}")
+	g.P()
+
+	g.P("type ", streamType, " struct {")
+	g.P(grpcPkg, ".ServerStream")
+	g.P("}")
+	g.P()
+
+	if genSend {
+		g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
+		g.P("return x.ServerStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genSendAndClose {
+		g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
+		g.P("return x.ServerStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genRecv {
+		g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
+		g.P("m := new(", inType, ")")
+		g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+
+	return hname
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
new file mode 100644
index 0000000..532a550
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
@@ -0,0 +1,34 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package main
+
+import _ "github.com/golang/protobuf/protoc-gen-go/grpc"
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
new file mode 100644
index 0000000..8e2486d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
@@ -0,0 +1,98 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
+// Go code.  Run it by building this program and putting it in your path with
+// the name
+// 	protoc-gen-go
+// That word 'go' at the end becomes part of the option string set for the
+// protocol compiler, so once the protocol compiler (protoc) is installed
+// you can run
+// 	protoc --go_out=output_directory input_directory/file.proto
+// to generate Go bindings for the protocol defined by file.proto.
+// With that input, the output will be written to
+// 	output_directory/file.pb.go
+//
+// The generated code is documented in the package comment for
+// the library.
+//
+// See the README and documentation for protocol buffers to learn more:
+// 	https://developers.google.com/protocol-buffers/
+package main
+
+import (
+	"io/ioutil"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+func main() {
+	// Begin by allocating a generator. The request and response structures are stored there
+	// so we can do error handling easily - the response structure contains the field to
+	// report failure.
+	g := generator.New()
+
+	data, err := ioutil.ReadAll(os.Stdin)
+	if err != nil {
+		g.Error(err, "reading input")
+	}
+
+	if err := proto.Unmarshal(data, g.Request); err != nil {
+		g.Error(err, "parsing input proto")
+	}
+
+	if len(g.Request.FileToGenerate) == 0 {
+		g.Fail("no files to generate")
+	}
+
+	g.CommandLineParameters(g.Request.GetParameter())
+
+	// Create a wrapped version of the Descriptors and EnumDescriptors that
+	// point to the file that defines them.
+	g.WrapTypes()
+
+	g.SetPackageNames()
+	g.BuildTypeNameMap()
+
+	g.GenerateAllFiles()
+
+	// Send back the results.
+	data, err = proto.Marshal(g.Response)
+	if err != nil {
+		g.Error(err, "failed to marshal output proto")
+	}
+	_, err = os.Stdout.Write(data)
+	if err != nil {
+		g.Error(err, "failed to write output proto")
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
new file mode 100644
index 0000000..61bfc10
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
@@ -0,0 +1,369 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+/*
+Package plugin_go is a generated protocol buffer package.
+
+It is generated from these files:
+	google/protobuf/compiler/plugin.proto
+
+It has these top-level messages:
+	Version
+	CodeGeneratorRequest
+	CodeGeneratorResponse
+*/
+package plugin_go
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of protocol compiler.
+type Version struct {
+	Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+	Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+	Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+	// be empty for mainline stable releases.
+	Suffix               *string  `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Version) Reset()                    { *m = Version{} }
+func (m *Version) String() string            { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage()               {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Version) Unmarshal(b []byte) error {
+	return xxx_messageInfo_Version.Unmarshal(m, b)
+}
+func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+}
+func (dst *Version) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Version.Merge(dst, src)
+}
+func (m *Version) XXX_Size() int {
+	return xxx_messageInfo_Version.Size(m)
+}
+func (m *Version) XXX_DiscardUnknown() {
+	xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func (m *Version) GetMajor() int32 {
+	if m != nil && m.Major != nil {
+		return *m.Major
+	}
+	return 0
+}
+
+func (m *Version) GetMinor() int32 {
+	if m != nil && m.Minor != nil {
+		return *m.Minor
+	}
+	return 0
+}
+
+func (m *Version) GetPatch() int32 {
+	if m != nil && m.Patch != nil {
+		return *m.Patch
+	}
+	return 0
+}
+
+func (m *Version) GetSuffix() string {
+	if m != nil && m.Suffix != nil {
+		return *m.Suffix
+	}
+	return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+	// The .proto files that were explicitly listed on the command-line.  The
+	// code generator should generate code only for these files.  Each file's
+	// descriptor will be included in proto_file, below.
+	FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+	// The generator parameter passed on the command-line.
+	Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	// FileDescriptorProtos for all files in files_to_generate and everything
+	// they import.  The files will appear in topological order, so each file
+	// appears before any file that imports it.
+	//
+	// protoc guarantees that all proto_files will be written after
+	// the fields above, even though this is not technically guaranteed by the
+	// protobuf wire format.  This theoretically could allow a plugin to stream
+	// in the FileDescriptorProtos and handle them one by one rather than read
+	// the entire set into memory at once.  However, as of this writing, this
+	// is not similarly optimized on protoc's end -- it will store all fields in
+	// memory at once before sending them to the plugin.
+	//
+	// Type names of fields and extensions in the FileDescriptorProto are always
+	// fully qualified.
+	ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+	// The version number of protocol compiler.
+	CompilerVersion      *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorRequest) Reset()                    { *m = CodeGeneratorRequest{} }
+func (m *CodeGeneratorRequest) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorRequest) ProtoMessage()               {}
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
+}
+func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
+}
+func (m *CodeGeneratorRequest) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorRequest.Size(m)
+}
+func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
+
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
+	if m != nil {
+		return m.FileToGenerate
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetParameter() string {
+	if m != nil && m.Parameter != nil {
+		return *m.Parameter
+	}
+	return ""
+}
+
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
+	if m != nil {
+		return m.ProtoFile
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
+	if m != nil {
+		return m.CompilerVersion
+	}
+	return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+	// Error message.  If non-empty, code generation failed.  The plugin process
+	// should exit with status code zero even if it reports an error in this way.
+	//
+	// This should be used to indicate errors in .proto files which prevent the
+	// code generator from generating correct code.  Errors which indicate a
+	// problem in protoc itself -- such as the input CodeGeneratorRequest being
+	// unparseable -- should be reported by writing a message to stderr and
+	// exiting with a non-zero status code.
+	Error                *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File                 []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *CodeGeneratorResponse) Reset()                    { *m = CodeGeneratorResponse{} }
+func (m *CodeGeneratorResponse) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse) ProtoMessage()               {}
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse.Size(m)
+}
+func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse) GetError() string {
+	if m != nil && m.Error != nil {
+		return *m.Error
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+	// The file name, relative to the output directory.  The name must not
+	// contain "." or ".." components and must be relative, not be absolute (so,
+	// the file cannot lie outside the output directory).  "/" must be used as
+	// the path separator, not "\".
+	//
+	// If the name is omitted, the content will be appended to the previous
+	// file.  This allows the generator to break large files into small chunks,
+	// and allows the generated text to be streamed back to protoc so that large
+	// files need not reside completely in memory at one time.  Note that as of
+	// this writing protoc does not optimize for this -- it will read the entire
+	// CodeGeneratorResponse before writing files to disk.
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// If non-empty, indicates that the named file should already exist, and the
+	// content here is to be inserted into that file at a defined insertion
+	// point.  This feature allows a code generator to extend the output
+	// produced by another code generator.  The original generator may provide
+	// insertion points by placing special annotations in the file that look
+	// like:
+	//   @@protoc_insertion_point(NAME)
+	// The annotation can have arbitrary text before and after it on the line,
+	// which allows it to be placed in a comment.  NAME should be replaced with
+	// an identifier naming the point -- this is what other generators will use
+	// as the insertion_point.  Code inserted at this point will be placed
+	// immediately above the line containing the insertion point (thus multiple
+	// insertions to the same point will come out in the order they were added).
+	// The double-@ is intended to make it unlikely that the generated code
+	// could contain things that look like insertion points by accident.
+	//
+	// For example, the C++ code generator places the following line in the
+	// .pb.h files that it generates:
+	//   // @@protoc_insertion_point(namespace_scope)
+	// This line appears within the scope of the file's package namespace, but
+	// outside of any particular class.  Another plugin can then specify the
+	// insertion_point "namespace_scope" to generate additional classes or
+	// other declarations that should be placed in this scope.
+	//
+	// Note that if the line containing the insertion point begins with
+	// whitespace, the same whitespace will be added to every line of the
+	// inserted text.  This is useful for languages like Python, where
+	// indentation matters.  In these languages, the insertion point comment
+	// should be indented the same amount as any inserted code will need to be
+	// in order to work correctly in that context.
+	//
+	// The code generator that generates the initial file and the one which
+	// inserts into it must both run as part of a single invocation of protoc.
+	// Code generators are executed in the order in which they appear on the
+	// command line.
+	//
+	// If |insertion_point| is present, |name| must also be present.
+	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+	// The file contents.
+	Content              *string  `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorResponse_File) Reset()                    { *m = CodeGeneratorResponse_File{} }
+func (m *CodeGeneratorResponse_File) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse_File) ProtoMessage()               {}
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse_File) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
+}
+func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse_File) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if m != nil && m.InsertionPoint != nil {
+		return *m.InsertionPoint
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetContent() string {
+	if m != nil && m.Content != nil {
+		return *m.Content
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
+	proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
+	proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
+	proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
+}
+
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
+	0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
+	0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
+	0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
+	0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
+	0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
+	0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
+	0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
+	0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
+	0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
+	0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
+	0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
+	0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
+	0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
+	0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
+	0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
+	0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
+	0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
+	0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
+	0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
+	0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
+	0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
+	0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
+	0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
+	0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
+	0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 0000000..8953d0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+	FileToGenerate   []string                               `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+	Parameter        *string                                `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+	XXX_unrecognized []byte                                 `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset()         { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage()       {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+	if this != nil && this.Parameter != nil {
+		return *this.Parameter
+	}
+	return ""
+}
+
+type CodeGeneratorResponse struct {
+	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset()         { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+	if this != nil && this.Error != nil {
+		return *this.Error
+	}
+	return ""
+}
+
+type CodeGeneratorResponse_File struct {
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	InsertionPoint   *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset()         { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+	if this != nil && this.Name != nil {
+		return *this.Name
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if this != nil && this.InsertionPoint != nil {
+		return *this.InsertionPoint
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+	if this != nil && this.Content != nil {
+		return *this.Content
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to
+//   change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path.  The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+  optional int32 major = 1;
+  optional int32 minor = 2;
+  optional int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+  // The .proto files that were explicitly listed on the command-line.  The
+  // code generator should generate code only for these files.  Each file's
+  // descriptor will be included in proto_file, below.
+  repeated string file_to_generate = 1;
+
+  // The generator parameter passed on the command-line.
+  optional string parameter = 2;
+
+  // FileDescriptorProtos for all files in files_to_generate and everything
+  // they import.  The files will appear in topological order, so each file
+  // appears before any file that imports it.
+  //
+  // protoc guarantees that all proto_files will be written after
+  // the fields above, even though this is not technically guaranteed by the
+  // protobuf wire format.  This theoretically could allow a plugin to stream
+  // in the FileDescriptorProtos and handle them one by one rather than read
+  // the entire set into memory at once.  However, as of this writing, this
+  // is not similarly optimized on protoc's end -- it will store all fields in
+  // memory at once before sending them to the plugin.
+  //
+  // Type names of fields and extensions in the FileDescriptorProto are always
+  // fully qualified.
+  repeated FileDescriptorProto proto_file = 15;
+
+  // The version number of protocol compiler.
+  optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+  // Error message.  If non-empty, code generation failed.  The plugin process
+  // should exit with status code zero even if it reports an error in this way.
+  //
+  // This should be used to indicate errors in .proto files which prevent the
+  // code generator from generating correct code.  Errors which indicate a
+  // problem in protoc itself -- such as the input CodeGeneratorRequest being
+  // unparseable -- should be reported by writing a message to stderr and
+  // exiting with a non-zero status code.
+  optional string error = 1;
+
+  // Represents a single generated file.
+  message File {
+    // The file name, relative to the output directory.  The name must not
+    // contain "." or ".." components and must be relative, not be absolute (so,
+    // the file cannot lie outside the output directory).  "/" must be used as
+    // the path separator, not "\".
+    //
+    // If the name is omitted, the content will be appended to the previous
+    // file.  This allows the generator to break large files into small chunks,
+    // and allows the generated text to be streamed back to protoc so that large
+    // files need not reside completely in memory at one time.  Note that as of
+    // this writing protoc does not optimize for this -- it will read the entire
+    // CodeGeneratorResponse before writing files to disk.
+    optional string name = 1;
+
+    // If non-empty, indicates that the named file should already exist, and the
+    // content here is to be inserted into that file at a defined insertion
+    // point.  This feature allows a code generator to extend the output
+    // produced by another code generator.  The original generator may provide
+    // insertion points by placing special annotations in the file that look
+    // like:
+    //   @@protoc_insertion_point(NAME)
+    // The annotation can have arbitrary text before and after it on the line,
+    // which allows it to be placed in a comment.  NAME should be replaced with
+    // an identifier naming the point -- this is what other generators will use
+    // as the insertion_point.  Code inserted at this point will be placed
+    // immediately above the line containing the insertion point (thus multiple
+    // insertions to the same point will come out in the order they were added).
+    // The double-@ is intended to make it unlikely that the generated code
+    // could contain things that look like insertion points by accident.
+    //
+    // For example, the C++ code generator places the following line in the
+    // .pb.h files that it generates:
+    //   // @@protoc_insertion_point(namespace_scope)
+    // This line appears within the scope of the file's package namespace, but
+    // outside of any particular class.  Another plugin can then specify the
+    // insertion_point "namespace_scope" to generate additional classes or
+    // other declarations that should be placed in this scope.
+    //
+    // Note that if the line containing the insertion point begins with
+    // whitespace, the same whitespace will be added to every line of the
+    // inserted text.  This is useful for languages like Python, where
+    // indentation matters.  In these languages, the insertion point comment
+    // should be indented the same amount as any inserted code will need to be
+    // in order to work correctly in that context.
+    //
+    // The code generator that generates the initial file and the one which
+    // inserts into it must both run as part of a single invocation of protoc.
+    // Code generators are executed in the order in which they appear on the
+    // command line.
+    //
+    // If |insertion_point| is present, |name| must also be present.
+    optional string insertion_point = 2;
+
+    // The file contents.
+    optional string content = 15;
+  }
+  repeated File file = 15;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+	// but it avoids scanning TypeUrl for the slash.
+	if any == nil {
+		return false
+	}
+	name := proto.MessageName(pb)
+	prefix := len(any.TypeUrl) - len(name)
+	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..78ee523
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package any
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	// A URL/resource name that uniquely identifies the type of the serialized
+	// protocol buffer message. The last segment of the URL's path must represent
+	// the fully qualified name of the type (as in
+	// `path/google.protobuf.Duration`). The name should be in a canonical form
+	// (e.g., leading "." is not accepted).
+	//
+	// In practice, teams usually precompile into the binary all types that they
+	// expect it to use in the context of Any. However, for URLs which use the
+	// scheme `http`, `https`, or no scheme, one can optionally set up a type
+	// server that maps type URLs to message definitions as follows:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Note: this functionality is not currently available in the official
+	// protobuf release, and it is not used for type URLs beginning with
+	// type.googleapis.com.
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Any) Reset()         { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()    {}
+func (*Any) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+	return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+	xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
+
+func (m *Any) GetTypeUrl() string {
+	if m != nil {
+		return m.TypeUrl
+	}
+	return ""
+}
+
+func (m *Any) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..4932942
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,154 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name that uniquely identifies the type of the serialized
+  // protocol buffer message. The last segment of the URL's path must represent
+  // the fully qualified name of the type (as in
+  // `path/google.protobuf.Duration`). The name should be in a canonical form
+  // (e.g., leading "." is not accepted).
+  //
+  // In practice, teams usually precompile into the binary all types that they
+  // expect it to use in the context of Any. However, for URLs which use the
+  // scheme `http`, `https`, or no scheme, one can optionally set up a type
+  // server that maps type URLs to message definitions as follows:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Note: this functionality is not currently available in the official
+  // protobuf release, and it is not used for type URLs beginning with
+  // type.googleapis.com.
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..26d1ca2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos) * time.Nanosecond
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..0d681ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package duration
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
+	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Duration) Reset()         { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()    {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+}
+
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+	return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *Duration) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+	// 190 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..b4eb03e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package empty
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_900544acb223d5b8, []int{0}
+}
+
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+
+var fileDescriptor_900544acb223d5b8 = []byte{
+	// 148 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+	0xb7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..33daa73
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+	0: "NULL_VALUE",
+}
+
+var NullValue_value = map[string]int32{
+	"NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+	return proto.EnumName(NullValue_name, int32(x))
+}
+
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	// Unordered map of dynamically typed values.
+	Fields               map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Struct) Reset()         { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage()    {}
+func (*Struct) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+	return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+	xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+	if m != nil {
+		return m.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	// The kind of value.
+	//
+	// Types that are valid to be assigned to Kind:
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind                 isValue_Kind `protobuf_oneof:"kind"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *Value) Reset()         { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage()    {}
+func (*Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{1}
+}
+
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+	return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+	if x, ok := m.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+	if x, ok := m.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (m *Value) GetStringValue() string {
+	if x, ok := m.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+	if x, ok := m.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+	if x, ok := m.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+	if x, ok := m.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	// Repeated field of dynamically typed values.
+	Values               []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListValue) Reset()         { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage()    {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{2}
+}
+
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+	return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+	if m != nil {
+		return m.Values
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+	proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+	0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+	0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+	0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+	0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+	0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+	0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+	0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+	0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+	0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+	0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+	0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+	0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+	0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+	0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+	0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+	0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+	0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+	0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+	0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+	0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+	0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+	0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+	0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+	0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+	0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8da0df0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,132 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	ts := &tspb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..31cd846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Timestamp) Reset()         { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()    {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+	// 191 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..eafb3fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..add19a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrappers
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	// The double value.
+	Value                float64  `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DoubleValue) Reset()         { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage()    {}
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{0}
+}
+
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
+}
+func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
+}
+func (m *DoubleValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DoubleValue.Merge(m, src)
+}
+func (m *DoubleValue) XXX_Size() int {
+	return xxx_messageInfo_DoubleValue.Size(m)
+}
+func (m *DoubleValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_DoubleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
+
+func (m *DoubleValue) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	// The float value.
+	Value                float32  `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FloatValue) Reset()         { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage()    {}
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{1}
+}
+
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FloatValue.Unmarshal(m, b)
+}
+func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
+}
+func (m *FloatValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FloatValue.Merge(m, src)
+}
+func (m *FloatValue) XXX_Size() int {
+	return xxx_messageInfo_FloatValue.Size(m)
+}
+func (m *FloatValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_FloatValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FloatValue proto.InternalMessageInfo
+
+func (m *FloatValue) GetValue() float32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	// The int64 value.
+	Value                int64    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int64Value) Reset()         { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage()    {}
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{2}
+}
+
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int64Value.Unmarshal(m, b)
+}
+func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
+}
+func (m *Int64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int64Value.Merge(m, src)
+}
+func (m *Int64Value) XXX_Size() int {
+	return xxx_messageInfo_Int64Value.Size(m)
+}
+func (m *Int64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64Value proto.InternalMessageInfo
+
+func (m *Int64Value) GetValue() int64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	// The uint64 value.
+	Value                uint64   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt64Value) Reset()         { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage()    {}
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{3}
+}
+
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
+}
+func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
+}
+func (m *UInt64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt64Value.Merge(m, src)
+}
+func (m *UInt64Value) XXX_Size() int {
+	return xxx_messageInfo_UInt64Value.Size(m)
+}
+func (m *UInt64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
+
+func (m *UInt64Value) GetValue() uint64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	// The int32 value.
+	Value                int32    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int32Value) Reset()         { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage()    {}
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{4}
+}
+
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int32Value.Unmarshal(m, b)
+}
+func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
+}
+func (m *Int32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int32Value.Merge(m, src)
+}
+func (m *Int32Value) XXX_Size() int {
+	return xxx_messageInfo_Int32Value.Size(m)
+}
+func (m *Int32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32Value proto.InternalMessageInfo
+
+func (m *Int32Value) GetValue() int32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	// The uint32 value.
+	Value                uint32   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt32Value) Reset()         { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage()    {}
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{5}
+}
+
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
+}
+func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
+}
+func (m *UInt32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt32Value.Merge(m, src)
+}
+func (m *UInt32Value) XXX_Size() int {
+	return xxx_messageInfo_UInt32Value.Size(m)
+}
+func (m *UInt32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
+
+func (m *UInt32Value) GetValue() uint32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	// The bool value.
+	Value                bool     `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BoolValue) Reset()         { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage()    {}
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{6}
+}
+
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BoolValue.Unmarshal(m, b)
+}
+func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
+}
+func (m *BoolValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BoolValue.Merge(m, src)
+}
+func (m *BoolValue) XXX_Size() int {
+	return xxx_messageInfo_BoolValue.Size(m)
+}
+func (m *BoolValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BoolValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolValue proto.InternalMessageInfo
+
+func (m *BoolValue) GetValue() bool {
+	if m != nil {
+		return m.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	// The string value.
+	Value                string   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StringValue) Reset()         { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage()    {}
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{7}
+}
+
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_StringValue.Unmarshal(m, b)
+}
+func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
+}
+func (m *StringValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StringValue.Merge(m, src)
+}
+func (m *StringValue) XXX_Size() int {
+	return xxx_messageInfo_StringValue.Size(m)
+}
+func (m *StringValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_StringValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringValue proto.InternalMessageInfo
+
+func (m *StringValue) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	// The bytes value.
+	Value                []byte   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BytesValue) Reset()         { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage()    {}
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{8}
+}
+
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BytesValue.Unmarshal(m, b)
+}
+func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
+}
+func (m *BytesValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesValue.Merge(m, src)
+}
+func (m *BytesValue) XXX_Size() int {
+	return xxx_messageInfo_BytesValue.Size(m)
+}
+func (m *BytesValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BytesValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesValue proto.InternalMessageInfo
+
+func (m *BytesValue) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+	proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+	proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+	proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+	proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+	proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+	proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
+
+var fileDescriptor_5377b62bda767935 = []byte{
+	// 259 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+	0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+	0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+	0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+	0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+	0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+	0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+	0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+	0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+	0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+	0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+	0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+	0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+	0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+	0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+	0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 0000000..0194763
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+  // The double value.
+  double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+  // The float value.
+  float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+  // The int64 value.
+  int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+  // The uint64 value.
+  uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+  // The int32 value.
+  int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+  // The uint32 value.
+  uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+  // The string value.
+  string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+  // The bytes value.
+  bytes value = 1;
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..7e215f2
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,553 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// reflect.DeepEqual for comparing whether two values are semantically equal.
+//
+// The primary features of cmp are:
+//
+// • When the default behavior of equality does not suit the needs of the test,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as they
+// are within some tolerance of each other.
+//
+// • Types that have an Equal method may use that method to determine equality.
+// This allows package authors to determine the equality operation for the types
+// that they define.
+//
+// • If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on both
+// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
+// fields are not compared by default; they result in panics unless suppressed
+// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared
+// using the AllowUnexported option.
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/google/go-cmp/cmp/internal/diff"
+	"github.com/google/go-cmp/cmp/internal/function"
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
+// the reflection package's inability to retrieve such entries. Equal will panic
+// anytime it comes across a NaN key, but this behavior may change.
+//
+// See https://golang.org/issue/11104 for more details.
+
+var nothing = reflect.Value{}
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// • If two values are not of the same type, then they are never equal
+// and the overall result is false.
+//
+// • Let S be the set of all Ignore, Transformer, and Comparer options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one Ignore exists in S, then the comparison is ignored.
+// If the number of Transformer and Comparer options in S is greater than one,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single Transformer, then use that to transform the current
+// values and recursively call Equal on the output values.
+// If S contains a single Comparer, then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// • If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil.
+// Otherwise, no such method exists and evaluation proceeds to the next rule.
+//
+// • Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings, and
+// channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+// Pointers are equal if the underlying values they point to are also equal.
+// Interfaces are equal if their underlying concrete values are also equal.
+//
+// Structs are equal if all of their fields are equal. If a struct contains
+// unexported fields, Equal panics unless the AllowUnexported option is used or
+// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
+//
+// Arrays, slices, and maps are equal if they are both nil or both non-nil
+// with the same length and the elements at each index or key are equal.
+// Note that a non-nil empty slice and a nil slice are not equal.
+// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
+func Equal(x, y interface{}, opts ...Option) bool {
+	s := newState(opts)
+	s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
+	return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values.
+// It returns an empty string if and only if Equal returns true for the same
+// input values and options. The output string will use the "-" symbol to
+// indicate elements removed from x, and the "+" symbol to indicate elements
+// added to y.
+//
+// Do not depend on this output being stable.
+func Diff(x, y interface{}, opts ...Option) string {
+	r := new(defaultReporter)
+	opts = Options{Options(opts), r}
+	eq := Equal(x, y, opts...)
+	d := r.String()
+	if (d == "") != eq {
+		panic("inconsistent difference and equality results")
+	}
+	return d
+}
+
+type state struct {
+	// These fields represent the "comparison state".
+	// Calling statelessCompare must not result in observable changes to these.
+	result   diff.Result // The current result of comparison
+	curPath  Path        // The current path in the value tree
+	reporter reporter    // Optional reporter used for difference formatting
+
+	// dynChecker triggers pseudo-random checks for option correctness.
+	// It is safe for statelessCompare to mutate this value.
+	dynChecker dynChecker
+
+	// These fields, once set by processOption, will not change.
+	exporters map[reflect.Type]bool // Set of structs with unexported field visibility
+	opts      Options               // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+	s := new(state)
+	for _, opt := range opts {
+		s.processOption(opt)
+	}
+	return s
+}
+
+func (s *state) processOption(opt Option) {
+	switch opt := opt.(type) {
+	case nil:
+	case Options:
+		for _, o := range opt {
+			s.processOption(o)
+		}
+	case coreOption:
+		type filtered interface {
+			isFiltered() bool
+		}
+		if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+			panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+		}
+		s.opts = append(s.opts, opt)
+	case visibleStructs:
+		if s.exporters == nil {
+			s.exporters = make(map[reflect.Type]bool)
+		}
+		for t := range opt {
+			s.exporters[t] = true
+		}
+	case reporter:
+		if s.reporter != nil {
+			panic("difference reporter already registered")
+		}
+		s.reporter = opt
+	default:
+		panic(fmt.Sprintf("unknown option %T", opt))
+	}
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
+	// We do not save and restore the curPath because all of the compareX
+	// methods should properly push and pop from the path.
+	// It is an implementation bug if the contents of curPath differs from
+	// when calling this function to when returning from it.
+
+	oldResult, oldReporter := s.result, s.reporter
+	s.result = diff.Result{} // Reset result
+	s.reporter = nil         // Remove reporter to avoid spurious printouts
+	s.compareAny(vx, vy)
+	res := s.result
+	s.result, s.reporter = oldResult, oldReporter
+	return res
+}
+
+func (s *state) compareAny(vx, vy reflect.Value) {
+	// TODO: Support cyclic data structures.
+
+	// Rule 0: Differing types are never equal.
+	if !vx.IsValid() || !vy.IsValid() {
+		s.report(vx.IsValid() == vy.IsValid(), vx, vy)
+		return
+	}
+	if vx.Type() != vy.Type() {
+		s.report(false, vx, vy) // Possible for path to be empty
+		return
+	}
+	t := vx.Type()
+	if len(s.curPath) == 0 {
+		s.curPath.push(&pathStep{typ: t})
+		defer s.curPath.pop()
+	}
+	vx, vy = s.tryExporting(vx, vy)
+
+	// Rule 1: Check whether an option applies on this node in the value tree.
+	if s.tryOptions(vx, vy, t) {
+		return
+	}
+
+	// Rule 2: Check whether the type has a valid Equal method.
+	if s.tryMethod(vx, vy, t) {
+		return
+	}
+
+	// Rule 3: Recursively descend into each value's underlying kind.
+	switch t.Kind() {
+	case reflect.Bool:
+		s.report(vx.Bool() == vy.Bool(), vx, vy)
+		return
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		s.report(vx.Int() == vy.Int(), vx, vy)
+		return
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		s.report(vx.Uint() == vy.Uint(), vx, vy)
+		return
+	case reflect.Float32, reflect.Float64:
+		s.report(vx.Float() == vy.Float(), vx, vy)
+		return
+	case reflect.Complex64, reflect.Complex128:
+		s.report(vx.Complex() == vy.Complex(), vx, vy)
+		return
+	case reflect.String:
+		s.report(vx.String() == vy.String(), vx, vy)
+		return
+	case reflect.Chan, reflect.UnsafePointer:
+		s.report(vx.Pointer() == vy.Pointer(), vx, vy)
+		return
+	case reflect.Func:
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	case reflect.Ptr:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		s.curPath.push(&indirect{pathStep{t.Elem()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Interface:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		if vx.Elem().Type() != vy.Elem().Type() {
+			s.report(false, vx.Elem(), vy.Elem())
+			return
+		}
+		s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Slice:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		fallthrough
+	case reflect.Array:
+		s.compareArray(vx, vy, t)
+		return
+	case reflect.Map:
+		s.compareMap(vx, vy, t)
+		return
+	case reflect.Struct:
+		s.compareStruct(vx, vy, t)
+		return
+	default:
+		panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+	}
+}
+
+func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
+	if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
+		if sf.force {
+			// Use unsafe pointer arithmetic to get read-write access to an
+			// unexported field in the struct.
+			vx = unsafeRetrieveField(sf.pvx, sf.field)
+			vy = unsafeRetrieveField(sf.pvy, sf.field)
+		} else {
+			// We are not allowed to export the value, so invalidate them
+			// so that tryOptions can panic later if not explicitly ignored.
+			vx = nothing
+			vy = nothing
+		}
+	}
+	return vx, vy
+}
+
+func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
+	// If there were no FilterValues, we will not detect invalid inputs,
+	// so manually check for them and append invalid if necessary.
+	// We still evaluate the options since an ignore can override invalid.
+	opts := s.opts
+	if !vx.IsValid() || !vy.IsValid() {
+		opts = Options{opts, invalid{}}
+	}
+
+	// Evaluate all filters and apply the remaining options.
+	if opt := opts.filter(s, vx, vy, t); opt != nil {
+		opt.apply(s, vx, vy)
+		return true
+	}
+	return false
+}
+
+func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
+	// Check if this type even has an Equal method.
+	m, ok := t.MethodByName("Equal")
+	if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+		return false
+	}
+
+	eq := s.callTTBFunc(m.Func, vx, vy)
+	s.report(eq, vx, vy)
+	return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
+	v = sanitizeValue(v, f.Type().In(0))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{v})[0]
+	}
+
+	// Run the function twice and ensure that we get the same results back.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, v)
+	want := f.Call([]reflect.Value{v})[0]
+	if got := <-c; !s.statelessCompare(got, want).Equal() {
+		// To avoid false-positives with non-reflexive equality operations,
+		// we sanity check whether a value is equal to itself.
+		if !s.statelessCompare(want, want).Equal() {
+			return want
+		}
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
+	}
+	return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+	x = sanitizeValue(x, f.Type().In(0))
+	y = sanitizeValue(y, f.Type().In(1))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{x, y})[0].Bool()
+	}
+
+	// Swapping the input arguments is sufficient to check that
+	// f is symmetric and deterministic.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, y, x)
+	want := f.Call([]reflect.Value{x, y})[0].Bool()
+	if got := <-c; !got.IsValid() || got.Bool() != want {
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
+	}
+	return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+	var ret reflect.Value
+	defer func() {
+		recover() // Ignore panics, let the other call to f panic instead
+		c <- ret
+	}()
+	ret = f.Call(vs)[0]
+}
+
+// sanitizeValue converts nil interfaces of type T to those of type R,
+// assuming that T is assignable to R.
+// Otherwise, it returns the input value as is.
+func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
+	// TODO(dsnet): Remove this hacky workaround.
+	// See https://golang.org/issue/22143
+	if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
+		return reflect.New(t).Elem()
+	}
+	return v
+}
+
+func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
+	step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
+	s.curPath.push(step)
+
+	// Compute an edit-script for slices vx and vy.
+	es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+		step.xkey, step.ykey = ix, iy
+		return s.statelessCompare(vx.Index(ix), vy.Index(iy))
+	})
+
+	// Report the entire slice as is if the arrays are of primitive kind,
+	// and the arrays are different enough.
+	isPrimitive := false
+	switch t.Elem().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+		isPrimitive = true
+	}
+	if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
+		s.curPath.pop() // Pop first since we are reporting the whole slice
+		s.report(false, vx, vy)
+		return
+	}
+
+	// Replay the edit-script.
+	var ix, iy int
+	for _, e := range es {
+		switch e {
+		case diff.UniqueX:
+			step.xkey, step.ykey = ix, -1
+			s.report(false, vx.Index(ix), nothing)
+			ix++
+		case diff.UniqueY:
+			step.xkey, step.ykey = -1, iy
+			s.report(false, nothing, vy.Index(iy))
+			iy++
+		default:
+			step.xkey, step.ykey = ix, iy
+			if e == diff.Identity {
+				s.report(true, vx.Index(ix), vy.Index(iy))
+			} else {
+				s.compareAny(vx.Index(ix), vy.Index(iy))
+			}
+			ix++
+			iy++
+		}
+	}
+	s.curPath.pop()
+	return
+}
+
+func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	}
+
+	// We combine and sort the two map keys so that we can perform the
+	// comparisons in a deterministic order.
+	step := &mapIndex{pathStep: pathStep{t.Elem()}}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+		step.key = k
+		vvx := vx.MapIndex(k)
+		vvy := vy.MapIndex(k)
+		switch {
+		case vvx.IsValid() && vvy.IsValid():
+			s.compareAny(vvx, vvy)
+		case vvx.IsValid() && !vvy.IsValid():
+			s.report(false, vvx, nothing)
+		case !vvx.IsValid() && vvy.IsValid():
+			s.report(false, nothing, vvy)
+		default:
+			// It is possible for both vvx and vvy to be invalid if the
+			// key contained a NaN value in it. There is no way in
+			// reflection to be able to retrieve these values.
+			// See https://golang.org/issue/11104
+			panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
+		}
+	}
+}
+
+func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
+	var vax, vay reflect.Value // Addressable versions of vx and vy
+
+	step := &structField{}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for i := 0; i < t.NumField(); i++ {
+		vvx := vx.Field(i)
+		vvy := vy.Field(i)
+		step.typ = t.Field(i).Type
+		step.name = t.Field(i).Name
+		step.idx = i
+		step.unexported = !isExported(step.name)
+		if step.unexported {
+			// Defer checking of unexported fields until later to give an
+			// Ignore a chance to ignore the field.
+			if !vax.IsValid() || !vay.IsValid() {
+				// For unsafeRetrieveField to work, the parent struct must
+				// be addressable. Create a new copy of the values if
+				// necessary to make them addressable.
+				vax = makeAddressable(vx)
+				vay = makeAddressable(vy)
+			}
+			step.force = s.exporters[t]
+			step.pvx = vax
+			step.pvy = vay
+			step.field = t.Field(i)
+		}
+		s.compareAny(vvx, vvy)
+	}
+}
+
+// report records the result of a single comparison.
+// It also calls Report if any reporter is registered.
+func (s *state) report(eq bool, vx, vy reflect.Value) {
+	if eq {
+		s.result.NSame++
+	} else {
+		s.result.NDiff++
+	}
+	if s.reporter != nil {
+		s.reporter.Report(vx, vy, eq, s.curPath)
+	}
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//	0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+	ok := dc.curr == dc.next
+	if ok {
+		dc.curr = 0
+		dc.next++
+	}
+	dc.curr++
+	return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+	if v.CanAddr() {
+		return v
+	}
+	vc := reflect.New(v.Type()).Elem()
+	vc.Set(v)
+	return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..42afa49
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,17 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+	return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..fd9f7f1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,122 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build debug
+
+package diff
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+//	go test -tags=debug -v
+//
+// Example output:
+//	=== RUN   TestDifference/#34
+//	┌───────────────────────────────┐
+//	│ \ · · · · · · · · · · · · · · │
+//	│ · # · · · · · · · · · · · · · │
+//	│ · \ · · · · · · · · · · · · · │
+//	│ · · \ · · · · · · · · · · · · │
+//	│ · · · X # · · · · · · · · · · │
+//	│ · · · # \ · · · · · · · · · · │
+//	│ · · · · · # # · · · · · · · · │
+//	│ · · · · · # \ · · · · · · · · │
+//	│ · · · · · · · \ · · · · · · · │
+//	│ · · · · · · · · \ · · · · · · │
+//	│ · · · · · · · · · \ · · · · · │
+//	│ · · · · · · · · · · \ · · # · │
+//	│ · · · · · · · · · · · \ # # · │
+//	│ · · · · · · · · · · · # # # · │
+//	│ · · · · · · · · · · # # # # · │
+//	│ · · · · · · · · · # # # # # · │
+//	│ · · · · · · · · · · · · · · \ │
+//	└───────────────────────────────┘
+//	[.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+	updateDelay  = 100 * time.Millisecond
+	finishDelay  = 500 * time.Millisecond
+	ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+	sync.Mutex
+	p1, p2           EditScript
+	fwdPath, revPath *EditScript
+	grid             []byte
+	lines            int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+	dbg.Lock()
+	dbg.fwdPath, dbg.revPath = p1, p2
+	top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+	row := "│ " + strings.Repeat("· ", nx) + "│\n"
+	btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+	dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+	dbg.lines = strings.Count(dbg.String(), "\n")
+	fmt.Print(dbg)
+
+	// Wrap the EqualFunc so that we can intercept each result.
+	return func(ix, iy int) (r Result) {
+		cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+		for i := range cell {
+			cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+		}
+		switch r = f(ix, iy); {
+		case r.Equal():
+			cell[0] = '\\'
+		case r.Similar():
+			cell[0] = 'X'
+		default:
+			cell[0] = '#'
+		}
+		return
+	}
+}
+
+func (dbg *debugger) Update() {
+	dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+	dbg.print(finishDelay)
+	dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+	dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+	for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+		dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+	}
+	return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+	if ansiTerminal {
+		fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+	}
+	fmt.Print(dbg)
+	time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..260befe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,363 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+	// Identity indicates that a symbol pair is identical in both list X and Y.
+	Identity EditType = iota
+	// UniqueX indicates that a symbol only exists in X and not Y.
+	UniqueX
+	// UniqueY indicates that a symbol only exists in Y and not X.
+	UniqueY
+	// Modified indicates that a symbol pair is a modification of each other.
+	Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+	b := make([]byte, len(es))
+	for i, e := range es {
+		switch e {
+		case Identity:
+			b[i] = '.'
+		case UniqueX:
+			b[i] = 'X'
+		case UniqueY:
+			b[i] = 'Y'
+		case Modified:
+			b[i] = 'M'
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+	for _, e := range es {
+		switch e {
+		case Identity:
+			s.NI++
+		case UniqueX:
+			s.NX++
+		case UniqueY:
+			s.NY++
+		case Modified:
+			s.NM++
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NSame is the number of sub-elements that are equal.
+// NDiff is the number of sub-elements that are not equal.
+type Result struct{ NSame, NDiff int }
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NSame to NDiff to determine similarity may change.
+func (r Result) Similar() bool {
+	// Use NSame+1 to offset NSame so that binary comparisons are similar.
+	return r.NSame+1 >= r.NDiff
+}
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+//	• eq == (es.Dist()==0)
+//	• nx == es.LenX()
+//	• ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+	// This algorithm is based on traversing what is known as an "edit-graph".
+	// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+	// by Eugene W. Myers. Since D can be as large as N itself, this is
+	// effectively O(N^2). Unlike the algorithm from that paper, we are not
+	// interested in the optimal path, but at least some "decent" path.
+	//
+	// For example, let X and Y be lists of symbols:
+	//	X = [A B C A B B A]
+	//	Y = [C B A B A C]
+	//
+	// The edit-graph can be drawn as the following:
+	//	   A B C A B B A
+	//	  ┌─────────────┐
+	//	C │_|_|\|_|_|_|_│ 0
+	//	B │_|\|_|_|\|\|_│ 1
+	//	A │\|_|_|\|_|_|\│ 2
+	//	B │_|\|_|_|\|\|_│ 3
+	//	A │\|_|_|\|_|_|\│ 4
+	//	C │ | |\| | | | │ 5
+	//	  └─────────────┘ 6
+	//	   0 1 2 3 4 5 6 7
+	//
+	// List X is written along the horizontal axis, while list Y is written
+	// along the vertical axis. At any point on this grid, if the symbol in
+	// list X matches the corresponding symbol in list Y, then a '\' is drawn.
+	// The goal of any minimal edit-script algorithm is to find a path from the
+	// top-left corner to the bottom-right corner, while traveling through the
+	// fewest horizontal or vertical edges.
+	// A horizontal edge is equivalent to inserting a symbol from list X.
+	// A vertical edge is equivalent to inserting a symbol from list Y.
+	// A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+	// Invariants:
+	//	• 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+	//	• 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+	//
+	// In general:
+	//	• fwdFrontier.X < revFrontier.X
+	//	• fwdFrontier.Y < revFrontier.Y
+	// Unless, it is time for the algorithm to terminate.
+	fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+	revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+	fwdFrontier := fwdPath.point // Forward search frontier
+	revFrontier := revPath.point // Reverse search frontier
+
+	// Search budget bounds the cost of searching for better paths.
+	// The longest sequence of non-matching symbols that can be tolerated is
+	// approximately the square-root of the search budget.
+	searchBudget := 4 * (nx + ny) // O(n)
+
+	// The algorithm below is a greedy, meet-in-the-middle algorithm for
+	// computing sub-optimal edit-scripts between two lists.
+	//
+	// The algorithm is approximately as follows:
+	//	• Searching for differences switches back-and-forth between
+	//	a search that starts at the beginning (the top-left corner), and
+	//	a search that starts at the end (the bottom-right corner). The goal of
+	//	the search is connect with the search from the opposite corner.
+	//	• As we search, we build a path in a greedy manner, where the first
+	//	match seen is added to the path (this is sub-optimal, but provides a
+	//	decent result in practice). When matches are found, we try the next pair
+	//	of symbols in the lists and follow all matches as far as possible.
+	//	• When searching for matches, we search along a diagonal going through
+	//	through the "frontier" point. If no matches are found, we advance the
+	//	frontier towards the opposite corner.
+	//	• This algorithm terminates when either the X coordinates or the
+	//	Y coordinates of the forward and reverse frontier points ever intersect.
+	//
+	// This algorithm is correct even if searching only in the forward direction
+	// or in the reverse direction. We do both because it is commonly observed
+	// that two lists commonly differ because elements were added to the front
+	// or end of the other list.
+	//
+	// Running the tests with the "debug" build tag prints a visualization of
+	// the algorithm running in real-time. This is educational for understanding
+	// how the algorithm works. See debug_enable.go.
+	f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+	for {
+		// Forward search from the beginning.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+			switch {
+			case p.X >= revPath.X || p.Y < fwdPath.Y:
+				stop1 = true // Hit top-right corner
+			case p.Y >= revPath.Y || p.X < fwdPath.X:
+				stop2 = true // Hit bottom-left corner
+			case f(p.X, p.Y).Equal():
+				// Match found, so connect the path to this point.
+				fwdPath.connect(p, f)
+				fwdPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(fwdPath.X, fwdPath.Y).Equal() {
+						break
+					}
+					fwdPath.append(Identity)
+				}
+				fwdFrontier = fwdPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards reverse point.
+		if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+			fwdFrontier.X++
+		} else {
+			fwdFrontier.Y++
+		}
+
+		// Reverse search from the end.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{revFrontier.X - z, revFrontier.Y + z}
+			switch {
+			case fwdPath.X >= p.X || revPath.Y < p.Y:
+				stop1 = true // Hit bottom-left corner
+			case fwdPath.Y >= p.Y || revPath.X < p.X:
+				stop2 = true // Hit top-right corner
+			case f(p.X-1, p.Y-1).Equal():
+				// Match found, so connect the path to this point.
+				revPath.connect(p, f)
+				revPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(revPath.X-1, revPath.Y-1).Equal() {
+						break
+					}
+					revPath.append(Identity)
+				}
+				revFrontier = revPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards forward point.
+		if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+			revFrontier.X--
+		} else {
+			revFrontier.Y--
+		}
+	}
+
+	// Join the forward and reverse paths and then append the reverse path.
+	fwdPath.connect(revPath.point, f)
+	for i := len(revPath.es) - 1; i >= 0; i-- {
+		t := revPath.es[i]
+		revPath.es = revPath.es[:i]
+		fwdPath.append(t)
+	}
+	debug.Finish()
+	return fwdPath.es
+}
+
+type path struct {
+	dir   int // +1 if forward, -1 if reverse
+	point     // Leading point of the EditScript path
+	es    EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+	if p.dir > 0 {
+		// Connect in forward direction.
+		for dst.X > p.X && dst.Y > p.Y {
+			switch r := f(p.X, p.Y); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case dst.X-p.X >= dst.Y-p.Y:
+				p.append(UniqueX)
+			default:
+				p.append(UniqueY)
+			}
+		}
+		for dst.X > p.X {
+			p.append(UniqueX)
+		}
+		for dst.Y > p.Y {
+			p.append(UniqueY)
+		}
+	} else {
+		// Connect in reverse direction.
+		for p.X > dst.X && p.Y > dst.Y {
+			switch r := f(p.X-1, p.Y-1); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case p.Y-dst.Y >= p.X-dst.X:
+				p.append(UniqueY)
+			default:
+				p.append(UniqueX)
+			}
+		}
+		for p.X > dst.X {
+			p.append(UniqueX)
+		}
+		for p.Y > dst.Y {
+			p.append(UniqueY)
+		}
+	}
+}
+
+func (p *path) append(t EditType) {
+	p.es = append(p.es, t)
+	switch t {
+	case Identity, Modified:
+		p.add(p.dir, p.dir)
+	case UniqueX:
+		p.add(p.dir, 0)
+	case UniqueY:
+		p.add(0, p.dir)
+	}
+	debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//	[0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+	if x&1 != 0 {
+		x = ^x
+	}
+	return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..4c35ff1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,49 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package function identifies function types.
+package function
+
+import "reflect"
+
+type funcType int
+
+const (
+	_ funcType = iota
+
+	ttbFunc // func(T, T) bool
+	tibFunc // func(T, I) bool
+	trFunc  // func(T) R
+
+	Equal           = ttbFunc // func(T, T) bool
+	EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+	Transformer     = trFunc  // func(T) R
+	ValueFilter     = ttbFunc // func(T, T) bool
+	Less            = ttbFunc // func(T, T) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+	if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+		return false
+	}
+	ni, no := t.NumIn(), t.NumOut()
+	switch ft {
+	case ttbFunc: // func(T, T) bool
+		if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+			return true
+		}
+	case tibFunc: // func(T, I) bool
+		if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+			return true
+		}
+	case trFunc: // func(T) R
+		if ni == 1 && no == 1 {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
new file mode 100644
index 0000000..657e508
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
@@ -0,0 +1,277 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package value provides functionality for reflect.Value types.
+package value
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+// Format formats the value v as a string.
+//
+// This is similar to fmt.Sprintf("%+v", v) except this:
+//	* Prints the type unless it can be elided
+//	* Avoids printing struct fields that are zero
+//	* Prints a nil-slice as being nil, not empty
+//	* Prints map entries in deterministic order
+func Format(v reflect.Value, conf FormatConfig) string {
+	conf.printType = true
+	conf.followPointers = true
+	conf.realPointers = true
+	return formatAny(v, conf, nil)
+}
+
+type FormatConfig struct {
+	UseStringer        bool // Should the String method be used if available?
+	printType          bool // Should we print the type before the value?
+	PrintPrimitiveType bool // Should we print the type of primitives?
+	followPointers     bool // Should we recursively follow pointers?
+	realPointers       bool // Should we print the real address of pointers?
+}
+
+func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
+	// TODO: Should this be a multi-line printout in certain situations?
+
+	if !v.IsValid() {
+		return "<non-existent>"
+	}
+	if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
+		if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
+			return "<nil>"
+		}
+
+		const stringerPrefix = "s" // Indicates that the String method was used
+		s := v.Interface().(fmt.Stringer).String()
+		return stringerPrefix + formatString(s)
+	}
+
+	switch v.Kind() {
+	case reflect.Bool:
+		return formatPrimitive(v.Type(), v.Bool(), conf)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return formatPrimitive(v.Type(), v.Int(), conf)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
+			// Unnamed uints are usually bytes or words, so use hexadecimal.
+			return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
+		}
+		return formatPrimitive(v.Type(), v.Uint(), conf)
+	case reflect.Float32, reflect.Float64:
+		return formatPrimitive(v.Type(), v.Float(), conf)
+	case reflect.Complex64, reflect.Complex128:
+		return formatPrimitive(v.Type(), v.Complex(), conf)
+	case reflect.String:
+		return formatPrimitive(v.Type(), formatString(v.String()), conf)
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		return formatPointer(v, conf)
+	case reflect.Ptr:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("(%v)(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] || !conf.followPointers {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		return "&" + formatAny(v.Elem(), conf, visited)
+	case reflect.Interface:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		return formatAny(v.Elem(), conf, visited)
+	case reflect.Slice:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		fallthrough
+	case reflect.Array:
+		var ss []string
+		subConf := conf
+		subConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for i := 0; i < v.Len(); i++ {
+			s := formatAny(v.Index(i), subConf, visited)
+			ss = append(ss, s)
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Map:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+
+		var ss []string
+		keyConf, valConf := conf, conf
+		keyConf.printType = v.Type().Key().Kind() == reflect.Interface
+		keyConf.followPointers = false
+		valConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for _, k := range SortKeys(v.MapKeys()) {
+			sk := formatAny(k, keyConf, visited)
+			sv := formatAny(v.MapIndex(k), valConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Struct:
+		var ss []string
+		subConf := conf
+		subConf.printType = true
+		for i := 0; i < v.NumField(); i++ {
+			vv := v.Field(i)
+			if isZero(vv) {
+				continue // Elide zero value fields
+			}
+			name := v.Type().Field(i).Name
+			subConf.UseStringer = conf.UseStringer
+			s := formatAny(vv, subConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", name, s))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	default:
+		panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+	}
+}
+
+func formatString(s string) string {
+	// Use quoted string if it the same length as a raw string literal.
+	// Otherwise, attempt to use the raw string form.
+	qs := strconv.Quote(s)
+	if len(qs) == 1+len(s)+1 {
+		return qs
+	}
+
+	// Disallow newlines to ensure output is a single line.
+	// Only allow printable runes for readability purposes.
+	rawInvalid := func(r rune) bool {
+		return r == '`' || r == '\n' || !unicode.IsPrint(r)
+	}
+	if strings.IndexFunc(s, rawInvalid) < 0 {
+		return "`" + s + "`"
+	}
+	return qs
+}
+
+func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
+	if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
+		return fmt.Sprintf("%v(%v)", t, v)
+	}
+	return fmt.Sprintf("%v", v)
+}
+
+func formatPointer(v reflect.Value, conf FormatConfig) string {
+	p := v.Pointer()
+	if !conf.realPointers {
+		p = 0 // For deterministic printing purposes
+	}
+	s := formatHex(uint64(p))
+	if conf.printType {
+		return fmt.Sprintf("(%v)(%s)", v.Type(), s)
+	}
+	return s
+}
+
+func formatHex(u uint64) string {
+	var f string
+	switch {
+	case u <= 0xff:
+		f = "0x%02x"
+	case u <= 0xffff:
+		f = "0x%04x"
+	case u <= 0xffffff:
+		f = "0x%06x"
+	case u <= 0xffffffff:
+		f = "0x%08x"
+	case u <= 0xffffffffff:
+		f = "0x%010x"
+	case u <= 0xffffffffffff:
+		f = "0x%012x"
+	case u <= 0xffffffffffffff:
+		f = "0x%014x"
+	case u <= 0xffffffffffffffff:
+		f = "0x%016x"
+	}
+	return fmt.Sprintf(f, u)
+}
+
+// insertPointer insert p into m, allocating m if necessary.
+func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
+	if m == nil {
+		m = make(map[uintptr]bool)
+	}
+	m[p] = true
+	return m
+}
+
+// isZero reports whether v is the zero value.
+// This does not rely on Interface and so can be used on unexported fields.
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return v.Bool() == false
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.String:
+		return v.String() == ""
+	case reflect.UnsafePointer:
+		return v.Pointer() == 0
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+		return v.IsNil()
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if !isZero(v.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..fe8aa27
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,111 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package value
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+	if len(vs) == 0 {
+		return vs
+	}
+
+	// Sort the map keys.
+	sort.Sort(valueSorter(vs))
+
+	// Deduplicate keys (fails for NaNs).
+	vs2 := vs[:1]
+	for _, v := range vs[1:] {
+		if isLess(vs2[len(vs2)-1], v) {
+			vs2 = append(vs2, v)
+		}
+	}
+	return vs2
+}
+
+// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
+type valueSorter []reflect.Value
+
+func (vs valueSorter) Len() int           { return len(vs) }
+func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
+func (vs valueSorter) Swap(i, j int)      { vs[i], vs[j] = vs[j], vs[i] }
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+	switch x.Type().Kind() {
+	case reflect.Bool:
+		return !x.Bool() && y.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return x.Int() < y.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return x.Uint() < y.Uint()
+	case reflect.Float32, reflect.Float64:
+		fx, fy := x.Float(), y.Float()
+		return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+	case reflect.Complex64, reflect.Complex128:
+		cx, cy := x.Complex(), y.Complex()
+		rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+		if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+			return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+		}
+		return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+	case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+		return x.Pointer() < y.Pointer()
+	case reflect.String:
+		return x.String() < y.String()
+	case reflect.Array:
+		for i := 0; i < x.Len(); i++ {
+			if isLess(x.Index(i), y.Index(i)) {
+				return true
+			}
+			if isLess(y.Index(i), x.Index(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Struct:
+		for i := 0; i < x.NumField(); i++ {
+			if isLess(x.Field(i), y.Field(i)) {
+				return true
+			}
+			if isLess(y.Field(i), x.Field(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Interface:
+		vx, vy := x.Elem(), y.Elem()
+		if !vx.IsValid() || !vy.IsValid() {
+			return !vx.IsValid() && vy.IsValid()
+		}
+		tx, ty := vx.Type(), vy.Type()
+		if tx == ty {
+			return isLess(x.Elem(), y.Elem())
+		}
+		if tx.Kind() != ty.Kind() {
+			return vx.Kind() < vy.Kind()
+		}
+		if tx.String() != ty.String() {
+			return tx.String() < ty.String()
+		}
+		if tx.PkgPath() != ty.PkgPath() {
+			return tx.PkgPath() < ty.PkgPath()
+		}
+		// This can happen in rare situations, so we fallback to just comparing
+		// the unique pointer for a reflect.Type. This guarantees deterministic
+		// ordering within a program, but it is obviously not stable.
+		return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+	default:
+		// Must be Func, Map, or Slice; which are not comparable.
+		panic(fmt.Sprintf("%T is not comparable", x.Type()))
+	}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..91d4b06
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,453 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of Equal and Diff. In particular,
+// the fundamental Option functions (Ignore, Transformer, and Comparer),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters (FilterPath and
+// FilterValues) to control the scope over which they are applied.
+//
+// The cmp/cmpopts package provides helper functions for creating options that
+// may be used with Equal and Diff.
+type Option interface {
+	// filter applies all filters and returns the option that remains.
+	// Each option may only read s.curPath and call s.callTTBFunc.
+	//
+	// An Options is returned only if multiple comparers or transformers
+	// can apply simultaneously and will only contain values of those types
+	// or sub-Options containing values of those types.
+	filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
+}
+
+// applicableOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Grouping:    Options
+type applicableOption interface {
+	Option
+
+	// apply executes the option, which may mutate s or panic.
+	apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Filters:     *pathFilter | *valuesFilter
+type coreOption interface {
+	Option
+	isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of Option values that also satisfies the Option interface.
+// Helper comparison packages may return an Options value when packing multiple
+// Option values into a single Option. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
+	for _, opt := range opts {
+		switch opt := opt.filter(s, vx, vy, t); opt.(type) {
+		case ignore:
+			return ignore{} // Only ignore can short-circuit evaluation
+		case invalid:
+			out = invalid{} // Takes precedence over comparer or transformer
+		case *comparer, *transformer, Options:
+			switch out.(type) {
+			case nil:
+				out = opt
+			case invalid:
+				// Keep invalid
+			case *comparer, *transformer, Options:
+				out = Options{out, opt} // Conflicting comparers or transformers
+			}
+		}
+	}
+	return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+	const warning = "ambiguous set of applicable options"
+	const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+	var ss []string
+	for _, opt := range flattenOptions(nil, opts) {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	set := strings.Join(ss, "\n\t")
+	panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+	var ss []string
+	for _, opt := range opts {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new Option where opt is only evaluated if filter f
+// returns true for the current Path in the value tree.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterPath(f func(Path) bool, opt Option) Option {
+	if f == nil {
+		panic("invalid path filter function")
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		return &pathFilter{fnc: f, opt: opt}
+	}
+	return nil
+}
+
+type pathFilter struct {
+	core
+	fnc func(Path) bool
+	opt Option
+}
+
+func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if f.fnc(s.curPath) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f pathFilter) String() string {
+	fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
+	return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
+}
+
+// FilterValues returns a new Option where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If the type of the values is not
+// assignable to T, then this filter implicitly returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterValues(f interface{}, opt Option) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+		panic(fmt.Sprintf("invalid values filter function: %T", f))
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		vf := &valuesFilter{fnc: v, opt: opt}
+		if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+			vf.typ = ti
+		}
+		return vf
+	}
+	return nil
+}
+
+type valuesFilter struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+	opt Option
+}
+
+func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if !vx.IsValid() || !vy.IsValid() {
+		return invalid{}
+	}
+	if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f valuesFilter) String() string {
+	fn := getFuncName(f.fnc.Pointer())
+	return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
+}
+
+// Ignore is an Option that causes all comparisons to be ignored.
+// This value is intended to be combined with FilterPath or FilterValues.
+// It is an error to pass an unfiltered Ignore option to Equal.
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool                                                     { return false }
+func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
+func (ignore) apply(_ *state, _, _ reflect.Value)                                   { return }
+func (ignore) String() string                                                       { return "Ignore()" }
+
+// invalid is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields.
+type invalid struct{ core }
+
+func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
+func (invalid) apply(s *state, _, _ reflect.Value) {
+	const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
+	panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+}
+
+// Transformer returns an Option that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the Path since the last non-Transform step.
+//
+// The name is a user provided label that is used as the Transform.Name in the
+// transformation PathStep. If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+		panic(fmt.Sprintf("invalid transformer function: %T", f))
+	}
+	if name == "" {
+		name = "λ" // Lambda-symbol as place-holder for anonymous transformer
+	}
+	if !isValid(name) {
+		panic(fmt.Sprintf("invalid name: %q", name))
+	}
+	tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		tr.typ = ti
+	}
+	return tr
+}
+
+type transformer struct {
+	core
+	name string
+	typ  reflect.Type  // T
+	fnc  reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	for i := len(s.curPath) - 1; i >= 0; i-- {
+		if t, ok := s.curPath[i].(*transform); !ok {
+			break // Hit most recent non-Transform step
+		} else if tr == t.trans {
+			return nil // Cannot directly use same Transform
+		}
+	}
+	if tr.typ == nil || t.AssignableTo(tr.typ) {
+		return tr
+	}
+	return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+	// Update path before calling the Transformer so that dynamic checks
+	// will use the updated path.
+	s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
+	defer s.curPath.pop()
+
+	vx = s.callTRFunc(tr.fnc, vx)
+	vy = s.callTRFunc(tr.fnc, vy)
+	s.compareAny(vx, vy)
+}
+
+func (tr transformer) String() string {
+	return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
+}
+
+// Comparer returns an Option that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+//	• Symmetric: equal(x, y) == equal(y, x)
+//	• Deterministic: equal(x, y) == equal(x, y)
+//	• Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+		panic(fmt.Sprintf("invalid comparer function: %T", f))
+	}
+	cm := &comparer{fnc: v}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		cm.typ = ti
+	}
+	return cm
+}
+
+type comparer struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	if cm.typ == nil || t.AssignableTo(cm.typ) {
+		return cm
+	}
+	return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+	eq := s.callTTBFunc(cm.fnc, vx, vy)
+	s.report(eq, vx, vy)
+}
+
+func (cm comparer) String() string {
+	return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
+}
+
+// AllowUnexported returns an Option that forcibly allows operations on
+// unexported fields in certain structs, which are specified by passing in a
+// value of each struct type.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of Equal
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// For some cases, a custom Comparer should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the reflect.Type documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *regexp.Regexp types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using Comparers:
+//
+//	Comparer(func(x, y reflect.Type) bool { return x == y })
+//	Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
+// all unexported fields on specified struct types.
+func AllowUnexported(types ...interface{}) Option {
+	if !supportAllowUnexported {
+		panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS")
+	}
+	m := make(map[reflect.Type]bool)
+	for _, typ := range types {
+		t := reflect.TypeOf(typ)
+		if t.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("invalid struct type: %T", typ))
+		}
+		m[t] = true
+	}
+	return visibleStructs(m)
+}
+
+type visibleStructs map[reflect.Type]bool
+
+func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
+	panic("not implemented")
+}
+
+// reporter is an Option that configures how differences are reported.
+type reporter interface {
+	// TODO: Not exported yet.
+	//
+	// Perhaps add PushStep and PopStep and change Report to only accept
+	// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
+	// it clear that we are traversing the value tree in a depth-first-search
+	// manner, which has an effect on how values are printed.
+
+	Option
+
+	// Report is called for every comparison made and will be provided with
+	// the two values being compared, the equality result, and the
+	// current path in the value tree. It is possible for x or y to be an
+	// invalid reflect.Value if one of the values is non-existent;
+	// which is possible with maps and slices.
+	Report(x, y reflect.Value, eq bool, p Path)
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+	switch opts := flattenOptions(nil, Options{src}); len(opts) {
+	case 0:
+		return nil
+	case 1:
+		return opts[0]
+	default:
+		return opts
+	}
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+	for _, opt := range src {
+		switch opt := opt.(type) {
+		case nil:
+			continue
+		case Options:
+			dst = flattenOptions(dst, opt)
+		case coreOption:
+			dst = append(dst, opt)
+		default:
+			panic(fmt.Sprintf("invalid option type: %T", opt))
+		}
+	}
+	return dst
+}
+
+// getFuncName returns a short function name from the pointer.
+// The string parsing logic works up until Go1.9.
+func getFuncName(p uintptr) string {
+	fnc := runtime.FuncForPC(p)
+	if fnc == nil {
+		return "<unknown>"
+	}
+	name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
+	if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
+		// Strip the package name from method name.
+		name = strings.TrimSuffix(name, ")-fm")
+		name = strings.TrimSuffix(name, ")·fm")
+		if i := strings.LastIndexByte(name, '('); i >= 0 {
+			methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
+			if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
+				methodName = methodName[j+1:] // E.g., "myfunc"
+			}
+			name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
+		}
+	}
+	if i := strings.LastIndexByte(name, '/'); i >= 0 {
+		// Strip the package name.
+		name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
+	}
+	return name
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c08a3cf
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,309 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type (
+	// Path is a list of PathSteps describing the sequence of operations to get
+	// from some root type to the current position in the value tree.
+	// The first Path element is always an operation-less PathStep that exists
+	// simply to identify the initial type.
+	//
+	// When traversing structs with embedded structs, the embedded struct will
+	// always be accessed as a field before traversing the fields of the
+	// embedded struct themselves. That is, an exported field from the
+	// embedded struct will never be accessed directly from the parent struct.
+	Path []PathStep
+
+	// PathStep is a union-type for specific operations to traverse
+	// a value's tree structure. Users of this package never need to implement
+	// these types as values of this type will be returned by this package.
+	PathStep interface {
+		String() string
+		Type() reflect.Type // Resulting type after performing the path step
+		isPathStep()
+	}
+
+	// SliceIndex is an index operation on a slice or array at some index Key.
+	SliceIndex interface {
+		PathStep
+		Key() int // May return -1 if in a split state
+
+		// SplitKeys returns the indexes for indexing into slices in the
+		// x and y values, respectively. These indexes may differ due to the
+		// insertion or removal of an element in one of the slices, causing
+		// all of the indexes to be shifted. If an index is -1, then that
+		// indicates that the element does not exist in the associated slice.
+		//
+		// Key is guaranteed to return -1 if and only if the indexes returned
+		// by SplitKeys are not the same. SplitKeys will never return -1 for
+		// both indexes.
+		SplitKeys() (x int, y int)
+
+		isSliceIndex()
+	}
+	// MapIndex is an index operation on a map at some index Key.
+	MapIndex interface {
+		PathStep
+		Key() reflect.Value
+		isMapIndex()
+	}
+	// TypeAssertion represents a type assertion on an interface.
+	TypeAssertion interface {
+		PathStep
+		isTypeAssertion()
+	}
+	// StructField represents a struct field access on a field called Name.
+	StructField interface {
+		PathStep
+		Name() string
+		Index() int
+		isStructField()
+	}
+	// Indirect represents pointer indirection on the parent type.
+	Indirect interface {
+		PathStep
+		isIndirect()
+	}
+	// Transform is a transformation from the parent type to the current type.
+	Transform interface {
+		PathStep
+		Name() string
+		Func() reflect.Value
+
+		// Option returns the originally constructed Transformer option.
+		// The == operator can be used to detect the exact option used.
+		Option() Option
+
+		isTransform()
+	}
+)
+
+func (pa *Path) push(s PathStep) {
+	*pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+	*pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last PathStep in the Path.
+// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Last() PathStep {
+	return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Index(i int) PathStep {
+	if i < 0 {
+		i = len(pa) + i
+	}
+	if i < 0 || i >= len(pa) {
+		return pathStep{}
+	}
+	return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//	MyMap.MySlices.MyField
+func (pa Path) String() string {
+	var ss []string
+	for _, s := range pa {
+		if _, ok := s.(*structField); ok {
+			ss = append(ss, s.String())
+		}
+	}
+	return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//	(*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+	var ssPre, ssPost []string
+	var numIndirect int
+	for i, s := range pa {
+		var nextStep PathStep
+		if i+1 < len(pa) {
+			nextStep = pa[i+1]
+		}
+		switch s := s.(type) {
+		case *indirect:
+			numIndirect++
+			pPre, pPost := "(", ")"
+			switch nextStep.(type) {
+			case *indirect:
+				continue // Next step is indirection, so let them batch up
+			case *structField:
+				numIndirect-- // Automatic indirection on struct fields
+			case nil:
+				pPre, pPost = "", "" // Last step; no need for parenthesis
+			}
+			if numIndirect > 0 {
+				ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+				ssPost = append(ssPost, pPost)
+			}
+			numIndirect = 0
+			continue
+		case *transform:
+			ssPre = append(ssPre, s.trans.name+"(")
+			ssPost = append(ssPost, ")")
+			continue
+		case *typeAssertion:
+			// As a special-case, elide type assertions on anonymous types
+			// since they are typically generated dynamically and can be very
+			// verbose. For example, some transforms return interface{} because
+			// of Go's lack of generics, but typically take in and return the
+			// exact same concrete type.
+			if s.Type().PkgPath() == "" {
+				continue
+			}
+		}
+		ssPost = append(ssPost, s.String())
+	}
+	for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+		ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+	}
+	return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type (
+	pathStep struct {
+		typ reflect.Type
+	}
+
+	sliceIndex struct {
+		pathStep
+		xkey, ykey int
+	}
+	mapIndex struct {
+		pathStep
+		key reflect.Value
+	}
+	typeAssertion struct {
+		pathStep
+	}
+	structField struct {
+		pathStep
+		name string
+		idx  int
+
+		// These fields are used for forcibly accessing an unexported field.
+		// pvx, pvy, and field are only valid if unexported is true.
+		unexported bool
+		force      bool                // Forcibly allow visibility
+		pvx, pvy   reflect.Value       // Parent values
+		field      reflect.StructField // Field information
+	}
+	indirect struct {
+		pathStep
+	}
+	transform struct {
+		pathStep
+		trans *transformer
+	}
+)
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) String() string {
+	if ps.typ == nil {
+		return "<nil>"
+	}
+	s := ps.typ.String()
+	if s == "" || strings.ContainsAny(s, "{}\n") {
+		return "root" // Type too simple or complex to print
+	}
+	return fmt.Sprintf("{%s}", s)
+}
+
+func (si sliceIndex) String() string {
+	switch {
+	case si.xkey == si.ykey:
+		return fmt.Sprintf("[%d]", si.xkey)
+	case si.ykey == -1:
+		// [5->?] means "I don't know where X[5] went"
+		return fmt.Sprintf("[%d->?]", si.xkey)
+	case si.xkey == -1:
+		// [?->3] means "I don't know where Y[3] came from"
+		return fmt.Sprintf("[?->%d]", si.ykey)
+	default:
+		// [5->3] means "X[5] moved to Y[3]"
+		return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+	}
+}
+func (mi mapIndex) String() string      { return fmt.Sprintf("[%#v]", mi.key) }
+func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+func (sf structField) String() string   { return fmt.Sprintf(".%s", sf.name) }
+func (in indirect) String() string      { return "*" }
+func (tf transform) String() string     { return fmt.Sprintf("%s()", tf.trans.name) }
+
+func (si sliceIndex) Key() int {
+	if si.xkey != si.ykey {
+		return -1
+	}
+	return si.xkey
+}
+func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
+func (mi mapIndex) Key() reflect.Value      { return mi.key }
+func (sf structField) Name() string         { return sf.name }
+func (sf structField) Index() int           { return sf.idx }
+func (tf transform) Name() string           { return tf.trans.name }
+func (tf transform) Func() reflect.Value    { return tf.trans.fnc }
+func (tf transform) Option() Option         { return tf.trans }
+
+func (pathStep) isPathStep()           {}
+func (sliceIndex) isSliceIndex()       {}
+func (mapIndex) isMapIndex()           {}
+func (typeAssertion) isTypeAssertion() {}
+func (structField) isStructField()     {}
+func (indirect) isIndirect()           {}
+func (transform) isTransform()         {}
+
+var (
+	_ SliceIndex    = sliceIndex{}
+	_ MapIndex      = mapIndex{}
+	_ TypeAssertion = typeAssertion{}
+	_ StructField   = structField{}
+	_ Indirect      = indirect{}
+	_ Transform     = transform{}
+
+	_ PathStep = sliceIndex{}
+	_ PathStep = mapIndex{}
+	_ PathStep = typeAssertion{}
+	_ PathStep = structField{}
+	_ PathStep = indirect{}
+	_ PathStep = transform{}
+)
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+	r, _ := utf8.DecodeRuneInString(id)
+	return unicode.IsUpper(r)
+}
+
+// isValid reports whether the identifier is valid.
+// Empty and underscore-only strings are not valid.
+func isValid(id string) bool {
+	ok := id != "" && id != "_"
+	for j, c := range id {
+		ok = ok && (j > 0 || !unicode.IsDigit(c))
+		ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
+	}
+	return ok
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go
new file mode 100644
index 0000000..20e9f18
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/reporter.go
@@ -0,0 +1,53 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+type defaultReporter struct {
+	Option
+	diffs  []string // List of differences, possibly truncated
+	ndiffs int      // Total number of differences
+	nbytes int      // Number of bytes in diffs
+	nlines int      // Number of lines in diffs
+}
+
+var _ reporter = (*defaultReporter)(nil)
+
+func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
+	if eq {
+		return // Ignore equal results
+	}
+	const maxBytes = 4096
+	const maxLines = 256
+	r.ndiffs++
+	if r.nbytes < maxBytes && r.nlines < maxLines {
+		sx := value.Format(x, value.FormatConfig{UseStringer: true})
+		sy := value.Format(y, value.FormatConfig{UseStringer: true})
+		if sx == sy {
+			// Unhelpful output, so use more exact formatting.
+			sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
+			sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
+		}
+		s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
+		r.diffs = append(r.diffs, s)
+		r.nbytes += len(s)
+		r.nlines += strings.Count(s, "\n")
+	}
+}
+
+func (r *defaultReporter) String() string {
+	s := strings.Join(r.diffs, "")
+	if r.ndiffs == len(r.diffs) {
+		return s
+	}
+	return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
new file mode 100644
index 0000000..d1518eb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
@@ -0,0 +1,15 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build purego appengine js
+
+package cmp
+
+import "reflect"
+
+const supportAllowUnexported = false
+
+func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
+	panic("unsafeRetrieveField is not implemented")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
new file mode 100644
index 0000000..579b655
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
@@ -0,0 +1,23 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !purego,!appengine,!js
+
+package cmp
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const supportAllowUnexported = true
+
+// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
+// such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve.
+func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
+	return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
+}
diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore
new file mode 100644
index 0000000..149266f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+#*
+*~
+
+# examples binaries
+examples/synscan/synscan
+examples/pfdump/pfdump
+examples/pcapdump/pcapdump
+examples/httpassembly/httpassembly
+examples/statsassembly/statsassembly
+examples/arpscan/arpscan
+examples/bidirectional/bidirectional
+examples/bytediff/bytediff
+examples/reassemblydump/reassemblydump
+layers/gen
+macs/gen
+pcap/pcap_tester
diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh
new file mode 100644
index 0000000..e341a1c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.gofmt.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+if [ -n "$(go fmt ./...)" ]; then
+  echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
+  exit 1
+fi
diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh
new file mode 100644
index 0000000..0e267f5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.golint.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+
+go get golang.org/x/lint/golint
+DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag"
+# Add subdirectories here as we clean up golint on each.
+for subdir in $DIRS; do
+  pushd $subdir
+  if golint |
+      grep -v CannotSetRFMon |  # pcap exported error name
+      grep -v DataLost |        # tcpassembly/tcpreader exported error name
+      grep .; then
+    exit 1
+  fi
+  popd
+done
+
+pushd layers
+for file in *.go; do
+  if cat .lint_blacklist | grep -q $file; then
+    echo "Skipping lint of $file due to .lint_blacklist"
+  elif golint $file | grep .; then
+    echo "Lint error in file $file"
+    exit 1
+  fi
+done
+popd
diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh
new file mode 100644
index 0000000..a5c1354
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.govet.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag"
+set -e
+for subdir in $DIRS; do
+  pushd $subdir
+  go vet
+  popd
+done
diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh
new file mode 100644
index 0000000..648c901
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.install.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ev
+
+go get github.com/google/gopacket
+go get github.com/google/gopacket/layers
+go get github.com/google/gopacket/tcpassembly
+go get github.com/google/gopacket/reassembly
+go get github.com/google/gopacket/pcapgo
diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh
new file mode 100644
index 0000000..a483f4f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.script.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ev
+
+go test github.com/google/gopacket
+go test github.com/google/gopacket/layers
+go test github.com/google/gopacket/tcpassembly
+go test github.com/google/gopacket/reassembly
+go test github.com/google/gopacket/pcapgo 
+go test github.com/google/gopacket/pcap
diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml
new file mode 100644
index 0000000..8ebb01d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.yml
@@ -0,0 +1,55 @@
+language: go
+go:
+ - 1.11.x
+ - 1.12.x
+ - master
+
+addons:
+  apt:
+    packages:
+      libpcap-dev
+
+# use modules except for older versions (see below)
+install: true
+
+env:
+  - GO111MODULE=on
+
+script: ./.travis.script.sh
+
+matrix:
+  fast_finish: true
+  allow_failures:
+    - go: master
+
+jobs:
+  include:
+    - go: 1.5.x
+      install: ./.travis.install.sh
+    - go: 1.6.x
+      install: ./.travis.install.sh
+    - go: 1.7.x
+      install: ./.travis.install.sh
+    - go: 1.8.x
+      install: ./.travis.install.sh
+    - go: 1.9.x
+      install: ./.travis.install.sh
+    - go: 1.10.x
+      install: ./.travis.install.sh
+    - os: osx
+      go: 1.x
+    - os: windows
+      go: 1.x
+      # winpcap does not work on travis ci - so install nmap to get libpcap
+      before_install: choco install nmap
+    - stage: style
+      name: "fmt/vet/lint"
+      go: 1.x
+      script:
+        - ./.travis.gofmt.sh
+        - ./.travis.govet.sh
+        - ./.travis.golint.sh
+
+stages:
+  - style
+  - test
diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS
new file mode 100644
index 0000000..8431985
--- /dev/null
+++ b/vendor/github.com/google/gopacket/AUTHORS
@@ -0,0 +1,52 @@
+AUTHORS AND MAINTAINERS:
+
+MAIN DEVELOPERS:
+Graeme Connell   <gconnell@google.com, gsconnell@gmail.com>
+
+AUTHORS:
+Nigel Tao <nigeltao@google.com>
+Cole Mickens <cole.mickens@gmail.com>
+Ben Daglish <bdaglish@restorepoint.com>
+Luis Martinez <martinezlc99@gmail.com>
+Remco Verhoef <remco@dutchcoders.io>
+Hiroaki Kawai <Hiroaki.Kawai@gmail.com>
+Lukas Lueg <lukas.lueg@gmail.com>
+Laurent Hausermann <laurent.hausermann@gmail.com>
+Bill Green <bgreen@newrelic.com>
+Christian Mäder <christian.maeder@nine.ch>
+Gernot Vormayr <gvormayr@gmail.com>
+Vitor Garcia Graveto <victor.graveto@gmail.com>
+Elias Chavarria Reyes <elchavar@cisco.com>
+
+CONTRIBUTORS:
+Attila Oláh <attila@attilaolah.eu>
+Vittus Mikiassen <matt.miki.vimik@gmail.com>
+Matthias Radestock <matthias.radestock@gmail.com>
+Matthew Sackman <matthew@wellquite.org>
+Loic Prylli <loicp@google.com>
+Alexandre Fiori <fiorix@gmail.com>
+Adrian Tam <adrian.c.m.tam@gmail.com>
+Satoshi Matsumoto <kaorimatz@gmail.com>
+David Stainton <dstainton415@gmail.com>
+Jesse Ward <jesse@jesseward.com>
+Kane Mathers <kane@kanemathers.name>
+Jose Selvi <jselvi@pentester.es>
+Yerden Zhumabekov <yerden.zhumabekov@gmail.com>
+
+-----------------------------------------------
+FORKED FROM github.com/akrennmair/gopcap
+ALL THE FOLLOWING ARE FOR THAT PROJECT
+
+MAIN DEVELOPERS:
+Andreas Krennmair <ak@synflood.at>
+
+CONTRIBUTORS:
+Andrea Nall <anall@andreanall.com>
+Daniel Arndt <danielarndt@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
+Guillaume Savary <guillaume@savary.name>
+Mark Smith <mark@qq.is>
+Miek Gieben <miek@miek.nl>
+Mike Bell <mike@mikebell.org>
+Trevor Strohman <strohman@google.com>
diff --git a/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/google/gopacket/CONTRIBUTING.md
new file mode 100644
index 0000000..99ab7a2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/CONTRIBUTING.md
@@ -0,0 +1,215 @@
+Contributing To gopacket
+========================
+
+So you've got some code and you'd like it to be part of gopacket... wonderful!
+We're happy to accept contributions, whether they're fixes to old protocols, new
+protocols entirely, or anything else you think would improve the gopacket
+library.  This document is designed to help you to do just that.
+
+The first section deals with the plumbing:  how to actually get a change
+submitted.
+
+The second section deals with coding style... Go is great in that it
+has a uniform style implemented by 'go fmt', but there's still some decisions
+we've made that go above and beyond, and if you follow them, they won't come up
+in your code review.
+
+The third section deals with some of the implementation decisions we've made,
+which may help you to understand the current code and which we may ask you to
+conform to (or provide compelling reasons for ignoring).
+
+Overall, we hope this document will help you to understand our system and write
+great code which fits in, and help us to turn around on your code review quickly
+so the code can make it into the master branch as quickly as possible.
+
+
+How To Submit Code
+------------------
+
+We use github.com's Pull Request feature to receive code contributions from
+external contributors.  See
+https://help.github.com/articles/creating-a-pull-request/ for details on
+how to create a request.
+
+Also, there's a local script `gc` in the base directory of GoPacket that
+runs a local set of checks, which should give you relatively high confidence
+that your pull won't fail github pull checks.
+
+```sh
+go get github.com/google/gopacket
+cd $GOROOT/src/pkg/github.com/google/gopacket
+git checkout -b <mynewfeature>  # create a new branch to work from
+... code code code ...
+./gc  # Run this to do local commits, it performs a number of checks
+```
+
+To sum up:
+
+* DO
+    + Pull down the latest version.
+    + Make a feature-specific branch.
+    + Code using the style and methods discussed in the rest of this document.
+    + Use the ./gc command to do local commits or check correctness.
+    + Push your new feature branch up to github.com, as a pull request.
+    + Handle comments and requests from reviewers, pushing new commits up to
+      your feature branch as problems are addressed.
+    + Put interesting comments and discussions into commit comments.
+* DON'T
+    + Push to someone else's branch without their permission.
+
+
+Coding Style
+------------
+
+* Go code must be run through `go fmt`, `go vet`, and `golint`
+* Follow http://golang.org/doc/effective_go.html as much as possible.
+    + In particular, http://golang.org/doc/effective_go.html#mixed-caps.  Enums
+      should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs.
+      TcpSourcePort or TCP_SOURCE_PORT).
+* Bonus points for giving enum types a String() field.
+* Any exported types or functions should have commentary
+  (http://golang.org/doc/effective_go.html#commentary)
+
+
+Coding Methods And Implementation Notes
+---------------------------------------
+
+### Error Handling
+
+Many times, you'll be decoding a protocol and run across something bad, a packet
+corruption or the like.  How do you handle this?  First off, ALWAYS report the
+error.  You can do this either by returning the error from the decode() function
+(most common), or if you're up for it you can implement and add an ErrorLayer
+through the packet builder (the first method is a simple shortcut that does
+exactly this, then stops any future decoding).
+
+Often, you'll already have decode some part of your protocol by the time you hit
+your error.  Use your own discretion to determine whether the stuff you've
+already decoded should be returned to the caller or not:
+
+```go
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  prot := &MyProtocol{}
+  if len(data) < 10 {
+    // This error occurred before we did ANYTHING, so there's nothing in my
+    // protocol that the caller could possibly want.  Just return the error.
+    return fmt.Errorf("Length %d less than 10", len(data))
+  }
+  prot.ImportantField1 = data[:5]
+  prot.ImportantField2 = data[5:10]
+  // At this point, we've already got enough information in 'prot' to
+  // warrant returning it to the caller, so we'll add it now.
+  p.AddLayer(prot)
+  if len(data) < 15 {
+    // We encountered an error later in the packet, but the caller already
+    // has the important info we've gleaned so far.
+    return fmt.Errorf("Length %d less than 15", len(data))
+  }
+  prot.ImportantField3 = data[10:15]
+  return nil  // We've already added the layer, we can just return success.
+}
+```
+
+In general, our code follows the approach of returning the first error it
+encounters.  In general, we don't trust any bytes after the first error we see.
+
+### What Is A Layer?
+
+The definition of a layer is up to the discretion of the coder.  It should be
+something important enough that it's actually useful to the caller (IE: every
+TLV value should probably NOT be a layer).  However, it can be more granular
+than a single protocol... IPv6 and SCTP both implement many layers to handle the
+various parts of the protocol.  Use your best judgement, and prepare to defend
+your decisions during code review. ;)
+
+### Performance
+
+We strive to make gopacket as fast as possible while still providing lots of
+features.  In general, this means:
+
+* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize
+  others on an as-needed basis (tons of MPLS on your network?  Time to optimize
+  MPLS!)
+* Use fast operations.  See the toplevel benchmark_test for benchmarks of some
+  of Go's underlying features and types.
+* Test your performance changes!  You should use the ./gc script's --benchmark
+  flag to submit any performance-related changes.  Use pcap/gopacket_benchmark
+  to test your change against a PCAP file based on your traffic patterns.
+* Don't be TOO hacky.  Sometimes, removing an unused struct from a field causes
+  a huge performance hit, due to the way that Go currently handles its segmented
+  stack... don't be afraid to clean it up anyway.  We'll trust the Go compiler
+  to get good enough over time to handle this.  Also, this type of
+  compiler-specific optimization is very fragile; someone adding a field to an
+  entirely different struct elsewhere in the codebase could reverse any gains
+  you might achieve by aligning your allocations.
+* Try to minimize memory allocations.  If possible, use []byte to reference
+  pieces of the input, instead of using string, which requires copying the bytes
+  into a new memory allocation.
+* Think hard about what should be evaluated lazily vs. not.  In general, a
+  layer's struct should almost exactly mirror the layer's frame.  Anything
+  that's more interesting should be a function.  This may not always be
+  possible, but it's a good rule of thumb.
+* Don't fear micro-optimizations.  With the above in mind, we welcome
+  micro-optimizations that we think will have positive/neutral impacts on the
+  majority of workloads.  A prime example of this is pre-allocating certain
+  structs within a larger one:
+
+```go
+type MyProtocol struct {
+  // Most packets have 1-4 of VeryCommon, so we preallocate it here.
+  initialAllocation [4]uint32
+  VeryCommon []uint32
+}
+
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  prot := &MyProtocol{}
+  prot.VeryCommon = proto.initialAllocation[:0]
+  for len(data) > 4 {
+    field := binary.BigEndian.Uint32(data[:4])
+    data = data[4:]
+    // Since we're using the underlying initialAllocation, we won't need to
+    // allocate new memory for the following append unless we more than 16
+    // bytes of data, which should be the uncommon case.
+    prot.VeryCommon = append(prot.VeryCommon, field)
+  }
+  p.AddLayer(prot)
+  if len(data) > 0 {
+    return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data))
+  }
+  return nil
+}
+```
+
+### Slices And Data
+
+If you're pulling a slice from the data you're decoding, don't copy it.  Just
+use the slice itself.
+
+```go
+type MyProtocol struct {
+  A, B net.IP
+}
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  p.AddLayer(&MyProtocol{
+    A: data[:4],
+    B: data[4:8],
+  })
+  return nil
+}
+```
+
+The caller has already agreed, by using this library, that they won't modify the
+set of bytes they pass in to the decoder, or the library has already copied the
+set of bytes to a read-only location.  See DecodeOptions.NoCopy for more
+information.
+
+### Enums/Types
+
+If a protocol has an integer field (uint8, uint16, etc) with a couple of known
+values that mean something special, make it a type.  This allows us to do really
+nice things like adding a String() function to them, so we can more easily
+display those to users.  Check out layers/enums.go for one example, as well as
+layers/icmp.go for layer-specific enums.
+
+When naming things, try for descriptiveness over suscinctness.  For example,
+choose DNSResponseRecord over DNSRR.
diff --git a/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/google/gopacket/LICENSE
new file mode 100644
index 0000000..2100d52
--- /dev/null
+++ b/vendor/github.com/google/gopacket/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Google, Inc. All rights reserved.
+Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Andreas Krennmair, Google, nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md
new file mode 100644
index 0000000..a2f48a9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/README.md
@@ -0,0 +1,12 @@
+# GoPacket
+
+This library provides packet decoding capabilities for Go.
+See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
+
+[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket)
+[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket)
+
+Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Originally forked from the gopcap project written by Andreas
+Krennmair <ak@synflood.at> (http://github.com/akrennmair/gopcap).
diff --git a/vendor/github.com/google/gopacket/base.go b/vendor/github.com/google/gopacket/base.go
new file mode 100644
index 0000000..797b55f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/base.go
@@ -0,0 +1,178 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// Layer represents a single decoded packet layer (using either the
+// OSI or TCP/IP definition of a layer).  When decoding, a packet's data is
+// broken up into a number of layers.  The caller may call LayerType() to
+// figure out which type of layer they've received from the packet.  Optionally,
+// they may then use a type assertion to get the actual layer type for deep
+// inspection of the data.
+type Layer interface {
+	// LayerType is the gopacket type for this layer.
+	LayerType() LayerType
+	// LayerContents returns the set of bytes that make up this layer.
+	LayerContents() []byte
+	// LayerPayload returns the set of bytes contained within this layer, not
+	// including the layer itself.
+	LayerPayload() []byte
+}
+
+// Payload is a Layer containing the payload of a packet.  The definition of
+// what constitutes the payload of a packet depends on previous layers; for
+// TCP and UDP, we stop decoding above layer 4 and return the remaining
+// bytes as a Payload.  Payload is an ApplicationLayer.
+type Payload []byte
+
+// LayerType returns LayerTypePayload
+func (p Payload) LayerType() LayerType { return LayerTypePayload }
+
+// LayerContents returns the bytes making up this layer.
+func (p Payload) LayerContents() []byte { return []byte(p) }
+
+// LayerPayload returns the payload within this layer.
+func (p Payload) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as bytes.
+func (p Payload) Payload() []byte { return []byte(p) }
+
+// String implements fmt.Stringer.
+func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) }
+
+// GoString implements fmt.GoStringer.
+func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) }
+
+// CanDecode implements DecodingLayer.
+func (p Payload) CanDecode() LayerClass { return LayerTypePayload }
+
+// NextLayerType implements DecodingLayer.
+func (p Payload) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+	*p = Payload(data)
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+	bytes, err := b.PrependBytes(len(p))
+	if err != nil {
+		return err
+	}
+	copy(bytes, p)
+	return nil
+}
+
+// decodePayload decodes data by returning it all in a Payload layer.
+func decodePayload(data []byte, p PacketBuilder) error {
+	payload := &Payload{}
+	if err := payload.DecodeFromBytes(data, p); err != nil {
+		return nil
+	}
+	p.AddLayer(payload)
+	p.SetApplicationLayer(payload)
+	return nil
+}
+
+// Fragment is a Layer containing a fragment of a larger frame, used by layers
+// like IPv4 and IPv6 that allow for fragmentation of their payloads.
+type Fragment []byte
+
+// LayerType returns LayerTypeFragment
+func (p *Fragment) LayerType() LayerType { return LayerTypeFragment }
+
+// LayerContents implements Layer.
+func (p *Fragment) LayerContents() []byte { return []byte(*p) }
+
+// LayerPayload implements Layer.
+func (p *Fragment) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as a byte slice.
+func (p *Fragment) Payload() []byte { return []byte(*p) }
+
+// String implements fmt.Stringer.
+func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) }
+
+// CanDecode implements DecodingLayer.
+func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment }
+
+// NextLayerType implements DecodingLayer.
+func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+	*p = Fragment(data)
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+	bytes, err := b.PrependBytes(len(*p))
+	if err != nil {
+		return err
+	}
+	copy(bytes, *p)
+	return nil
+}
+
+// decodeFragment decodes data by returning it all in a Fragment layer.
+func decodeFragment(data []byte, p PacketBuilder) error {
+	payload := &Fragment{}
+	if err := payload.DecodeFromBytes(data, p); err != nil {
+		return nil
+	}
+	p.AddLayer(payload)
+	p.SetApplicationLayer(payload)
+	return nil
+}
+
+// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their
+// corresponding OSI layers, as best as possible.
+
+// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2)
+type LinkLayer interface {
+	Layer
+	LinkFlow() Flow
+}
+
+// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI
+// layer 3)
+type NetworkLayer interface {
+	Layer
+	NetworkFlow() Flow
+}
+
+// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI
+// layer 4)
+type TransportLayer interface {
+	Layer
+	TransportFlow() Flow
+}
+
+// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI
+// layer 7), also known as the packet payload.
+type ApplicationLayer interface {
+	Layer
+	Payload() []byte
+}
+
+// ErrorLayer is a packet layer created when decoding of the packet has failed.
+// Its payload is all the bytes that we were unable to decode, and the returned
+// error details why the decoding failed.
+type ErrorLayer interface {
+	Layer
+	Error() error
+}
diff --git a/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/google/gopacket/decode.go
new file mode 100644
index 0000000..2633f84
--- /dev/null
+++ b/vendor/github.com/google/gopacket/decode.go
@@ -0,0 +1,157 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"errors"
+)
+
+// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata.
+type DecodeFeedback interface {
+	// SetTruncated should be called if during decoding you notice that a packet
+	// is shorter than internal layer variables (HeaderLength, or the like) say it
+	// should be.  It sets packet.Metadata().Truncated.
+	SetTruncated()
+}
+
+type nilDecodeFeedback struct{}
+
+func (nilDecodeFeedback) SetTruncated() {}
+
+// NilDecodeFeedback implements DecodeFeedback by doing nothing.
+var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{}
+
+// PacketBuilder is used by layer decoders to store the layers they've decoded,
+// and to defer future decoding via NextDecoder.
+// Typically, the pattern for use is:
+//  func (m *myDecoder) Decode(data []byte, p PacketBuilder) error {
+//    if myLayer, err := myDecodingLogic(data); err != nil {
+//      return err
+//    } else {
+//      p.AddLayer(myLayer)
+//    }
+//    // maybe do this, if myLayer is a LinkLayer
+//    p.SetLinkLayer(myLayer)
+//    return p.NextDecoder(nextDecoder)
+//  }
+type PacketBuilder interface {
+	DecodeFeedback
+	// AddLayer should be called by a decoder immediately upon successful
+	// decoding of a layer.
+	AddLayer(l Layer)
+	// The following functions set the various specific layers in the final
+	// packet.  Note that if many layers call SetX, the first call is kept and all
+	// other calls are ignored.
+	SetLinkLayer(LinkLayer)
+	SetNetworkLayer(NetworkLayer)
+	SetTransportLayer(TransportLayer)
+	SetApplicationLayer(ApplicationLayer)
+	SetErrorLayer(ErrorLayer)
+	// NextDecoder should be called by a decoder when they're done decoding a
+	// packet layer but not done with decoding the entire packet.  The next
+	// decoder will be called to decode the last AddLayer's LayerPayload.
+	// Because of this, NextDecoder must only be called once all other
+	// PacketBuilder calls have been made.  Set*Layer and AddLayer calls after
+	// NextDecoder calls will behave incorrectly.
+	NextDecoder(next Decoder) error
+	// DumpPacketData is used solely for decoding.  If you come across an error
+	// you need to diagnose while processing a packet, call this and your packet's
+	// data will be dumped to stderr so you can create a test.  This should never
+	// be called from a production decoder.
+	DumpPacketData()
+	// DecodeOptions returns the decode options
+	DecodeOptions() *DecodeOptions
+}
+
+// Decoder is an interface for logic to decode a packet layer.  Users may
+// implement a Decoder to handle their own strange packet types, or may use one
+// of the many decoders available in the 'layers' subpackage to decode things
+// for them.
+type Decoder interface {
+	// Decode decodes the bytes of a packet, sending decoded values and other
+	// information to PacketBuilder, and returning an error if unsuccessful.  See
+	// the PacketBuilder documentation for more details.
+	Decode([]byte, PacketBuilder) error
+}
+
+// DecodeFunc wraps a function to make it a Decoder.
+type DecodeFunc func([]byte, PacketBuilder) error
+
+// Decode implements Decoder by calling itself.
+func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error {
+	// function, call thyself.
+	return d(data, p)
+}
+
+// DecodePayload is a Decoder that returns a Payload layer containing all
+// remaining bytes.
+var DecodePayload Decoder = DecodeFunc(decodePayload)
+
+// DecodeUnknown is a Decoder that returns an Unknown layer containing all
+// remaining bytes, useful if you run up against a layer that you're unable to
+// decode yet.  This layer is considered an ErrorLayer.
+var DecodeUnknown Decoder = DecodeFunc(decodeUnknown)
+
+// DecodeFragment is a Decoder that returns a Fragment layer containing all
+// remaining bytes.
+var DecodeFragment Decoder = DecodeFunc(decodeFragment)
+
+// LayerTypeZero is an invalid layer type, but can be used to determine whether
+// layer type has actually been set correctly.
+var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown})
+
+// LayerTypeDecodeFailure is the layer type for the default error layer.
+var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown})
+
+// LayerTypePayload is the layer type for a payload that we don't try to decode
+// but treat as a success, IE: an application-level payload.
+var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload})
+
+// LayerTypeFragment is the layer type for a fragment of a layer transported
+// by an underlying layer that supports fragmentation.
+var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment})
+
+// DecodeFailure is a packet layer created if decoding of the packet data failed
+// for some reason.  It implements ErrorLayer.  LayerContents will be the entire
+// set of bytes that failed to parse, and Error will return the reason parsing
+// failed.
+type DecodeFailure struct {
+	data  []byte
+	err   error
+	stack []byte
+}
+
+// Error returns the error encountered during decoding.
+func (d *DecodeFailure) Error() error { return d.err }
+
+// LayerContents implements Layer.
+func (d *DecodeFailure) LayerContents() []byte { return d.data }
+
+// LayerPayload implements Layer.
+func (d *DecodeFailure) LayerPayload() []byte { return nil }
+
+// String implements fmt.Stringer.
+func (d *DecodeFailure) String() string {
+	return "Packet decoding error: " + d.Error().Error()
+}
+
+// Dump implements Dumper.
+func (d *DecodeFailure) Dump() (s string) {
+	if d.stack != nil {
+		s = string(d.stack)
+	}
+	return
+}
+
+// LayerType returns LayerTypeDecodeFailure
+func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure }
+
+// decodeUnknown "decodes" unsupported data types by returning an error.
+// This decoder will thus always return a DecodeFailure layer.
+func decodeUnknown(data []byte, p PacketBuilder) error {
+	return errors.New("Layer type not currently supported")
+}
diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go
new file mode 100644
index 0000000..8e33e56
--- /dev/null
+++ b/vendor/github.com/google/gopacket/doc.go
@@ -0,0 +1,371 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package gopacket provides packet decoding for the Go language.
+
+gopacket contains many sub-packages with additional functionality you may find
+useful, including:
+
+ * layers: You'll probably use this every time.  This contains of the logic
+     built into gopacket for decoding packet protocols.  Note that all example
+     code below assumes that you have imported both gopacket and
+     gopacket/layers.
+ * pcap: C bindings to use libpcap to read packets off the wire.
+ * pfring: C bindings to use PF_RING to read packets off the wire.
+ * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire.
+ * tcpassembly: TCP stream reassembly
+
+Also, if you're looking to dive right into code, see the examples subdirectory
+for numerous simple binaries built using gopacket libraries.
+
+Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket,
+and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Basic Usage
+
+gopacket takes in packet data as a []byte and decodes it into a packet with
+a non-zero number of "layers".  Each layer corresponds to a protocol
+within the bytes.  Once a packet has been decoded, the layers of the packet
+can be requested from the packet.
+
+ // Decode a packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ // Get the TCP layer from this packet
+ if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+   fmt.Println("This is a TCP packet!")
+   // Get actual TCP data from this layer
+   tcp, _ := tcpLayer.(*layers.TCP)
+   fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort)
+ }
+ // Iterate over all layers, printing out each layer type
+ for _, layer := range packet.Layers() {
+   fmt.Println("PACKET LAYER:", layer.LayerType())
+ }
+
+Packets can be decoded from a number of starting points.  Many of our base
+types implement Decoder, which allow us to decode packets for which
+we don't have full data.
+
+ // Decode an ethernet packet
+ ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default)
+ // Decode an IPv6 header and everything it contains
+ ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default)
+ // Decode a TCP header and its payload
+ tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default)
+
+
+Reading Packets From A Source
+
+Most of the time, you won't just have a []byte of packet data lying around.
+Instead, you'll want to read packets in from somewhere (file, interface, etc)
+and process them.  To do that, you'll want to build a PacketSource.
+
+First, you'll need to construct an object that implements the PacketDataSource
+interface.  There are implementations of this interface bundled with gopacket
+in the gopacket/pcap and gopacket/pfring subpackages... see their documentation
+for more information on their usage.  Once you have a PacketDataSource, you can
+pass it into NewPacketSource, along with a Decoder of your choice, to create
+a PacketSource.
+
+Once you have a PacketSource, you can read packets from it in multiple ways.
+See the docs for PacketSource for more details.  The easiest method is the
+Packets function, which returns a channel, then asynchronously writes new
+packets into that channel, closing the channel if the packetSource hits an
+end-of-file.
+
+  packetSource := ...  // construct using pcap or pfring
+  for packet := range packetSource.Packets() {
+    handlePacket(packet)  // do something with each packet
+  }
+
+You can change the decoding options of the packetSource by setting fields in
+packetSource.DecodeOptions... see the following sections for more details.
+
+
+Lazy Decoding
+
+gopacket optionally decodes packet data lazily, meaning it
+only decodes a packet layer when it needs to handle a function call.
+
+ // Create a packet, but don't actually decode anything yet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Now, decode the packet up to the first IPv4 layer found but no further.
+ // If no IPv4 layer was found, the whole packet will be decoded looking for
+ // it.
+ ip4 := packet.Layer(layers.LayerTypeIPv4)
+ // Decode all layers and return them.  The layers up to the first IPv4 layer
+ // are already decoded, and will not require decoding a second time.
+ layers := packet.Layers()
+
+Lazily-decoded packets are not concurrency-safe.  Since layers have not all been
+decoded, each call to Layer() or Layers() has the potential to mutate the packet
+in order to decode the next layer.  If a packet is used
+in multiple goroutines concurrently, don't use gopacket.Lazy.  Then gopacket
+will decode the packet fully, and all future function calls won't mutate the
+object.
+
+
+NoCopy Decoding
+
+By default, gopacket will copy the slice passed to NewPacket and store the
+copy within the packet, so future mutations to the bytes underlying the slice
+don't affect the packet and its layers.  If you can guarantee that the
+underlying slice bytes won't be changed, you can use NoCopy to tell
+gopacket.NewPacket, and it'll use the passed-in slice itself.
+
+ // This channel returns new byte slices, each of which points to a new
+ // memory location that's guaranteed immutable for the duration of the
+ // packet.
+ for data := range myByteSliceChannel {
+   p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+   doSomethingWithPacket(p)
+ }
+
+The fastest method of decoding is to use both Lazy and NoCopy, but note from
+the many caveats above that for some implementations either or both may be
+dangerous.
+
+
+Pointers To Known Layers
+
+During decoding, certain layers are stored in the packet as well-known
+layer types.  For example, IPv4 and IPv6 are both considered NetworkLayer
+layers, while TCP and UDP are both TransportLayer layers.  We support 4
+layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly
+anagalous to layers 2, 3, 4, and 7 of the OSI model).  To access these,
+you can use the packet.LinkLayer, packet.NetworkLayer,
+packet.TransportLayer, and packet.ApplicationLayer functions.  Each of
+these functions returns a corresponding interface
+(gopacket.{Link,Network,Transport,Application}Layer).  The first three
+provide methods for getting src/dst addresses for that particular layer,
+while the final layer provides a Payload function to get payload data.
+This is helpful, for example, to get payloads for all packets regardless
+of their underlying data type:
+
+ // Get packets from some source
+ for packet := range someSource {
+   if app := packet.ApplicationLayer(); app != nil {
+     if strings.Contains(string(app.Payload()), "magic string") {
+       fmt.Println("Found magic string in a packet!")
+     }
+   }
+ }
+
+A particularly useful layer is ErrorLayer, which is set whenever there's
+an error parsing part of the packet.
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ if err := packet.ErrorLayer(); err != nil {
+   fmt.Println("Error decoding some part of the packet:", err)
+ }
+
+Note that we don't return an error from NewPacket because we may have decoded
+a number of layers successfully before running into our erroneous layer.  You
+may still be able to get your Ethernet and IPv4 layers correctly, even if
+your TCP layer is malformed.
+
+
+Flow And Endpoint
+
+gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol
+independent manner the fact that a packet is coming from A and going to B.
+The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide
+methods for extracting their flow information, without worrying about the type
+of the underlying Layer.
+
+A Flow is a simple object made up of a set of two Endpoints, one source and one
+destination.  It details the sender and receiver of the Layer of the Packet.
+
+An Endpoint is a hashable representation of a source or destination.  For
+example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4
+IP packet.  A Flow can be broken into Endpoints, and Endpoints can be combined
+into Flows:
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ netFlow := packet.NetworkLayer().NetworkFlow()
+ src, dst := netFlow.Endpoints()
+ reverseFlow := gopacket.NewFlow(dst, src)
+
+Both Endpoint and Flow objects can be used as map keys, and the equality
+operator can compare them, so you can easily group together all packets
+based on endpoint criteria:
+
+ flows := map[gopacket.Endpoint]chan gopacket.Packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Send all TCP packets to channels based on their destination port.
+ if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil {
+   flows[tcp.TransportFlow().Dst()] <- packet
+ }
+ // Look for all packets with the same source and destination network address
+ if net := packet.NetworkLayer(); net != nil {
+   src, dst := net.NetworkFlow().Endpoints()
+   if src == dst {
+     fmt.Println("Fishy packet has same network source and dst: %s", src)
+   }
+ }
+ // Find all packets coming from UDP port 1000 to UDP port 500
+ interestingFlow := gopacket.NewFlow(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
+ if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
+   fmt.Println("Found that UDP flow I was looking for!")
+ }
+
+For load-balancing purposes, both Flow and Endpoint have FastHash() functions,
+which provide quick, non-cryptographic hashes of their contents.  Of particular
+importance is the fact that Flow FastHash() is symmetric: A->B will have the same
+hash as B->A.  An example usage could be:
+
+ channels := [8]chan gopacket.Packet
+ for i := 0; i < 8; i++ {
+   channels[i] = make(chan gopacket.Packet)
+   go packetHandler(channels[i])
+ }
+ for packet := range getPackets() {
+   if net := packet.NetworkLayer(); net != nil {
+     channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet
+   }
+ }
+
+This allows us to split up a packet stream while still making sure that each
+stream sees all packets for a flow (and its bidirectional opposite).
+
+
+Implementing Your Own Decoder
+
+If your network has some strange encapsulation, you can implement your own
+decoder.  In this example, we handle Ethernet packets which are encapsulated
+in a 4-byte header.
+
+ // Create a layer type, should be unique and high, so it doesn't conflict,
+ // giving it a name and a decoder to use.
+ var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)})
+
+ // Implement my layer
+ type MyLayer struct {
+   StrangeHeader []byte
+   payload []byte
+ }
+ func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType }
+ func (m MyLayer) LayerContents() []byte { return m.StrangeHeader }
+ func (m MyLayer) LayerPayload() []byte { return m.payload }
+
+ // Now implement a decoder... this one strips off the first 4 bytes of the
+ // packet.
+ func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error {
+   // Create my layer
+   p.AddLayer(&MyLayer{data[:4], data[4:]})
+   // Determine how to handle the rest of the packet
+   return p.NextDecoder(layers.LayerTypeEthernet)
+ }
+
+ // Finally, decode your packets:
+ p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy)
+
+See the docs for Decoder and PacketBuilder for more details on how coding
+decoders works, or look at RegisterLayerType and RegisterEndpointType to see how
+to add layer/endpoint types to gopacket.
+
+
+Fast Decoding With DecodingLayerParser
+
+TLDR:  DecodingLayerParser takes about 10% of the time as NewPacket to decode
+packet data, but only for known packet stacks.
+
+Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow
+due to its need to allocate a new packet and every respective layer.  It's very
+versatile and can handle all known layer types, but sometimes you really only
+care about a specific set of layers regardless, so that versatility is wasted.
+
+DecodingLayerParser avoids memory allocation altogether by decoding packet
+layers directly into preallocated objects, which you can then reference to get
+the packet's information.  A quick example:
+
+ func main() {
+   var eth layers.Ethernet
+   var ip4 layers.IPv4
+   var ip6 layers.IPv6
+   var tcp layers.TCP
+   parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp)
+   decoded := []gopacket.LayerType{}
+   for packetData := range somehowGetPacketData() {
+     if err := parser.DecodeLayers(packetData, &decoded); err != nil {
+       fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+       continue
+     }
+     for _, layerType := range decoded {
+       switch layerType {
+         case layers.LayerTypeIPv6:
+           fmt.Println("    IP6 ", ip6.SrcIP, ip6.DstIP)
+         case layers.LayerTypeIPv4:
+           fmt.Println("    IP4 ", ip4.SrcIP, ip4.DstIP)
+       }
+     }
+   }
+ }
+
+The important thing to note here is that the parser is modifying the passed in
+layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly
+speeding up the decoding process.  It's even branching based on layer type...
+it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack.  However, it won't
+handle any other type... since no other decoders were passed in, an (eth, ip4,
+udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet,
+LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't
+decode a UDP packet).
+
+Unfortunately, not all layers can be used by DecodingLayerParser... only those
+implementing the DecodingLayer interface are usable.  Also, it's possible to
+create DecodingLayers that are not themselves Layers... see
+layers.IPv6ExtensionSkipper for an example of this.
+
+
+Creating Packet Data
+
+As well as offering the ability to decode packet data, gopacket will allow you
+to create packets from scratch, as well.  A number of gopacket layers implement
+the SerializableLayer interface; these layers can be serialized to a []byte in
+the following manner:
+
+  ip := &layers.IPv4{
+    SrcIP: net.IP{1, 2, 3, 4},
+    DstIP: net.IP{5, 6, 7, 8},
+    // etc...
+  }
+  buf := gopacket.NewSerializeBuffer()
+  opts := gopacket.SerializeOptions{}  // See SerializeOptions for more details.
+  err := ip.SerializeTo(buf, opts)
+  if err != nil { panic(err) }
+  fmt.Println(buf.Bytes())  // prints out a byte slice containing the serialized IPv4 layer.
+
+SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat
+the current buffer's Bytes() slice as the payload of the serializing layer.
+Therefore, you can serialize an entire packet by serializing a set of layers in
+reverse order (Payload, then TCP, then IP, then Ethernet, for example).  The
+SerializeBuffer's SerializeLayers function is a helper that does exactly that.
+
+To generate a (empty and useless, because no fields are set)
+Ethernet(IPv4(TCP(Payload))) packet, for example, you can run:
+
+  buf := gopacket.NewSerializeBuffer()
+  opts := gopacket.SerializeOptions{}
+  gopacket.SerializeLayers(buf, opts,
+    &layers.Ethernet{},
+    &layers.IPv4{},
+    &layers.TCP{},
+    gopacket.Payload([]byte{1, 2, 3, 4}))
+  packetData := buf.Bytes()
+
+A Final Note
+
+If you use gopacket, you'll almost definitely want to make sure gopacket/layers
+is imported, since when imported it sets all the LayerType variables and fills
+in a lot of interesting variables/maps (DecodersByLayerName, etc).  Therefore,
+it's recommended that even if you don't use any layers functions directly, you still import with:
+
+  import (
+    _ "github.com/google/gopacket/layers"
+  )
+*/
+package gopacket
diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go
new file mode 100644
index 0000000..a00c883
--- /dev/null
+++ b/vendor/github.com/google/gopacket/flows.go
@@ -0,0 +1,236 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// MaxEndpointSize determines the maximum size in bytes of an endpoint address.
+//
+// Endpoints/Flows have a problem:  They need to be hashable.  Therefore, they
+// can't use a byte slice.  The two obvious choices are to use a string or a
+// byte array.  Strings work great, but string creation requires memory
+// allocation, which can be slow.  Arrays work great, but have a fixed size.  We
+// originally used the former, now we've switched to the latter.  Use of a fixed
+// byte-array doubles the speed of constructing a flow (due to not needing to
+// allocate).  This is a huge increase... too much for us to pass up.
+//
+// The end result of this, though, is that an endpoint/flow can't be created
+// using more than MaxEndpointSize bytes per address.
+const MaxEndpointSize = 16
+
+// Endpoint is the set of bytes used to address packets at various layers.
+// See LinkLayer, NetworkLayer, and TransportLayer specifications.
+// Endpoints are usable as map keys.
+type Endpoint struct {
+	typ EndpointType
+	len int
+	raw [MaxEndpointSize]byte
+}
+
+// EndpointType returns the endpoint type associated with this endpoint.
+func (a Endpoint) EndpointType() EndpointType { return a.typ }
+
+// Raw returns the raw bytes of this endpoint.  These aren't human-readable
+// most of the time, but they are faster than calling String.
+func (a Endpoint) Raw() []byte { return a.raw[:a.len] }
+
+// LessThan provides a stable ordering for all endpoints.  It sorts first based
+// on the EndpointType of an endpoint, then based on the raw bytes of that
+// endpoint.
+//
+// For some endpoints, the actual comparison may not make sense, however this
+// ordering does provide useful information for most Endpoint types.
+// Ordering is based first on endpoint type, then on raw endpoint bytes.
+// Endpoint bytes are sorted lexicographically.
+func (a Endpoint) LessThan(b Endpoint) bool {
+	return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0)
+}
+
+// fnvHash is used by our FastHash functions, and implements the FNV hash
+// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
+// See http://isthe.com/chongo/tech/comp/fnv/.
+func fnvHash(s []byte) (h uint64) {
+	h = fnvBasis
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= fnvPrime
+	}
+	return
+}
+
+const fnvBasis = 14695981039346656037
+const fnvPrime = 1099511628211
+
+// FastHash provides a quick hashing function for an endpoint, useful if you'd
+// like to split up endpoints by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (a Endpoint) FastHash() (h uint64) {
+	h = fnvHash(a.raw[:a.len])
+	h ^= uint64(a.typ)
+	h *= fnvPrime
+	return
+}
+
+// NewEndpoint creates a new Endpoint object.
+//
+// The size of raw must be less than MaxEndpointSize, otherwise this function
+// will panic.
+func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) {
+	e.len = len(raw)
+	if e.len > MaxEndpointSize {
+		panic("raw byte length greater than MaxEndpointSize")
+	}
+	e.typ = typ
+	copy(e.raw[:], raw)
+	return
+}
+
+// EndpointTypeMetadata is used to register a new endpoint type.
+type EndpointTypeMetadata struct {
+	// Name is the string returned by an EndpointType's String function.
+	Name string
+	// Formatter is called from an Endpoint's String function to format the raw
+	// bytes in an Endpoint into a human-readable string.
+	Formatter func([]byte) string
+}
+
+// EndpointType is the type of a gopacket Endpoint.  This type determines how
+// the bytes stored in the endpoint should be interpreted.
+type EndpointType int64
+
+var endpointTypes = map[EndpointType]EndpointTypeMetadata{}
+
+// RegisterEndpointType creates a new EndpointType and registers it globally.
+// It MUST be passed a unique number, or it will panic.  Numbers 0-999 are
+// reserved for gopacket's use.
+func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType {
+	t := EndpointType(num)
+	if _, ok := endpointTypes[t]; ok {
+		panic("Endpoint type number already in use")
+	}
+	endpointTypes[t] = meta
+	return t
+}
+
+func (e EndpointType) String() string {
+	if t, ok := endpointTypes[e]; ok {
+		return t.Name
+	}
+	return strconv.Itoa(int(e))
+}
+
+func (a Endpoint) String() string {
+	if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil {
+		return t.Formatter(a.raw[:a.len])
+	}
+	return fmt.Sprintf("%v:%v", a.typ, a.raw)
+}
+
+// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint.
+// Flows are usable as map keys.
+type Flow struct {
+	typ        EndpointType
+	slen, dlen int
+	src, dst   [MaxEndpointSize]byte
+}
+
+// FlowFromEndpoints creates a new flow by pasting together two endpoints.
+// The endpoints must have the same EndpointType, or this function will return
+// an error.
+func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) {
+	if src.typ != dst.typ {
+		err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ)
+		return
+	}
+	return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil
+}
+
+// FastHash provides a quick hashing function for a flow, useful if you'd
+// like to split up flows by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide
+// with its reverse flow.  IE: the flow A->B will have the same hash as the flow
+// B->A.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (f Flow) FastHash() (h uint64) {
+	// This combination must be commutative.  We don't use ^, since that would
+	// give the same hash for all A->A flows.
+	h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen])
+	h ^= uint64(f.typ)
+	h *= fnvPrime
+	return
+}
+
+// String returns a human-readable representation of this flow, in the form
+// "Src->Dst"
+func (f Flow) String() string {
+	s, d := f.Endpoints()
+	return fmt.Sprintf("%v->%v", s, d)
+}
+
+// EndpointType returns the EndpointType for this Flow.
+func (f Flow) EndpointType() EndpointType {
+	return f.typ
+}
+
+// Endpoints returns the two Endpoints for this flow.
+func (f Flow) Endpoints() (src, dst Endpoint) {
+	return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst}
+}
+
+// Src returns the source Endpoint for this flow.
+func (f Flow) Src() (src Endpoint) {
+	src, _ = f.Endpoints()
+	return
+}
+
+// Dst returns the destination Endpoint for this flow.
+func (f Flow) Dst() (dst Endpoint) {
+	_, dst = f.Endpoints()
+	return
+}
+
+// Reverse returns a new flow with endpoints reversed.
+func (f Flow) Reverse() Flow {
+	return Flow{f.typ, f.dlen, f.slen, f.dst, f.src}
+}
+
+// NewFlow creates a new flow.
+//
+// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will
+// panic.
+func NewFlow(t EndpointType, src, dst []byte) (f Flow) {
+	f.slen = len(src)
+	f.dlen = len(dst)
+	if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize {
+		panic("flow raw byte length greater than MaxEndpointSize")
+	}
+	f.typ = t
+	copy(f.src[:], src)
+	copy(f.dst[:], dst)
+	return
+}
+
+// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints
+// that are specified incorrectly during creation.
+var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string {
+	return fmt.Sprintf("%v", b)
+}})
+
+// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid.
+var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil)
+
+// InvalidFlow is a singleton Flow of type EndpointInvalid.
+var InvalidFlow = NewFlow(EndpointInvalid, nil, nil)
diff --git a/vendor/github.com/google/gopacket/gc b/vendor/github.com/google/gopacket/gc
new file mode 100644
index 0000000..b1d8d2e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/gc
@@ -0,0 +1,288 @@
+#!/bin/bash
+# Copyright 2012 Google, Inc. All rights reserved.
+
+# This script provides a simple way to run benchmarks against previous code and
+# keep a log of how benchmarks change over time.  When used with the --benchmark
+# flag, it runs benchmarks from the current code and from the last commit run
+# with --benchmark, then stores the results in the git commit description.  We
+# rerun the old benchmarks along with the new ones, since there's no guarantee
+# that git commits will happen on the same machine, so machine differences could
+# cause wildly inaccurate results.
+#
+# If you're making changes to 'gopacket' which could cause performance changes,
+# you may be requested to use this commit script to make sure your changes don't
+# have large detrimental effects (or to show off how awesome your performance
+# improvements are).
+#
+# If not run with the --benchmark flag, this script is still very useful... it
+# makes sure all the correct go formatting, building, and testing work as
+# expected.
+
+function Usage {
+  cat <<EOF
+USAGE:  $0 [--benchmark regexp] [--root] [--gen] <git commit flags...>
+
+--benchmark:  Run benchmark comparisons against last benchmark'd commit
+--root:  Run tests that require root priviledges
+--gen:  Generate code for MACs/ports by pulling down external data
+
+Note, some 'git commit' flags are necessary, if all else fails, pass in -a
+EOF
+  exit 1
+}
+
+BENCH=""
+GEN=""
+ROOT=""
+while [ ! -z "$1" ]; do
+  case "$1" in
+    "--benchmark")
+      BENCH="$2"
+      shift
+      shift
+      ;;
+    "--gen")
+      GEN="yes"
+      shift
+      ;;
+    "--root")
+      ROOT="yes"
+      shift
+      ;;
+    "--help")
+      Usage
+      ;;
+    "-h")
+      Usage
+      ;;
+    "help")
+      Usage
+      ;;
+    *)
+      break
+      ;;
+  esac
+done
+
+function Root {
+  if [ ! -z "$ROOT" ]; then
+    local exec="$1"
+    # Some folks (like me) keep source code in places inaccessible by root (like
+    # NFS), so to make sure things run smoothly we copy them to a /tmp location.
+    local tmpfile="$(mktemp -t gopacket_XXXXXXXX)"
+    echo "Running root test executable $exec as $tmpfile"
+    cp "$exec" "$tmpfile"
+    chmod a+x "$tmpfile"
+    shift
+    sudo "$tmpfile" "$@"
+  fi
+}
+
+if [ "$#" -eq "0" ]; then
+  Usage
+fi
+
+cd $(dirname $0)
+
+# Check for copyright notices.
+for filename in $(find ./ -type f -name '*.go'); do
+  if ! head -n 1 "$filename" | grep -q Copyright; then
+    echo "File '$filename' may not have copyright notice"
+    exit 1
+  fi
+done
+
+set -e
+set -x
+
+if [ ! -z "$ROOT" ]; then
+  echo "Running SUDO to get root priviledges for root tests"
+  sudo echo "have root"
+fi
+
+if [ ! -z "$GEN" ]; then
+  pushd macs
+  go run gen.go | gofmt > valid_mac_prefixes.go
+  popd
+  pushd layers
+  go run gen.go | gofmt > iana_ports.go
+  go run gen2.go | gofmt > enums_generated.go
+  popd
+fi
+
+# Make sure everything is formatted, compiles, and tests pass.
+go fmt ./...
+go test -i ./... 2>/dev/null >/dev/null || true
+go test
+go build
+pushd examples/bytediff
+go build
+popd
+if [ -f /usr/include/pcap.h ]; then
+  pushd pcap
+  go test ./...
+  go build ./...
+  go build pcap_tester.go
+  Root pcap_tester --mode=basic
+  Root pcap_tester --mode=filtered
+  Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources"
+  popd
+  pushd examples/afpacket
+  go build
+  popd
+  pushd examples/pcapdump
+  go build
+  popd
+  pushd examples/arpscan
+  go build
+  popd
+  pushd examples/bidirectional
+  go build
+  popd
+  pushd examples/synscan
+  go build
+  popd
+  pushd examples/httpassembly
+  go build
+  popd
+  pushd examples/statsassembly
+  go build
+  popd
+fi
+pushd macs
+go test ./...
+gofmt -w gen.go
+go build gen.go
+popd
+pushd tcpassembly
+go test ./...
+popd
+pushd reassembly
+go test ./...
+popd
+pushd layers
+gofmt -w gen.go
+go build gen.go
+go test ./...
+popd
+pushd pcapgo
+go test ./...
+go build ./...
+popd
+if [ -f /usr/include/linux/if_packet.h ]; then
+  if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then
+    pushd afpacket
+    go build ./...
+    go test ./...
+    popd
+  fi
+fi
+if [ -f /usr/include/pfring.h ]; then
+  pushd pfring
+  go test ./...
+  go build ./...
+  popd
+  pushd examples/pfdump
+  go build
+  popd
+fi
+pushd ip4defrag
+go test ./...
+popd
+pushd defrag
+go test ./...
+popd
+
+for travis_script in `ls .travis.*.sh`; do
+  ./$travis_script
+done
+
+# Run our initial commit
+git commit "$@"
+
+if [ -z "$BENCH" ]; then
+  set +x
+  echo "We're not benchmarking and we've committed... we're done!"
+  exit
+fi
+
+### If we get here, we want to run benchmarks from current commit, and compare
+### then to benchmarks from the last --benchmark commit.
+
+# Get our current branch.
+BRANCH="$(git branch | grep '^*' | awk '{print $2}')"
+
+# File we're going to build our commit description in.
+COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)"
+
+# Add the word "BENCH" to the start of the git commit.
+echo -n "BENCH " > $COMMIT_FILE
+
+# Get the current description... there must be an easier way.
+git log -n 1 | grep '^ ' | sed 's/^    //' >> $COMMIT_FILE
+
+# Get the commit sha for the last benchmark commit
+PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}')
+
+## Run current benchmarks
+
+cat >> $COMMIT_FILE <<EOF
+
+
+----------------------------------------------------------
+BENCHMARK_MARKER_DO_NOT_CHANGE
+----------------------------------------------------------
+
+Go version $(go version)
+
+
+TEST BENCHMARKS "$BENCH"
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+  go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset to last benchmark commit, run benchmarks
+
+git checkout $PREV
+
+cat >> $COMMIT_FILE <<EOF
+----------------------------------------------------------
+BENCHMARKING AGAINST COMMIT $PREV
+----------------------------------------------------------
+
+
+OLD TEST BENCHMARKS
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+OLD PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+  go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset back to the most recent commit, edit the commit message by appending
+## benchmark results.
+git checkout $BRANCH
+git commit --amend -F $COMMIT_FILE
diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod
new file mode 100644
index 0000000..99e99f4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.mod
@@ -0,0 +1,8 @@
+module github.com/google/gopacket
+
+go 1.12
+
+require (
+	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+	golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67
+)
diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum
new file mode 100644
index 0000000..2b28942
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.sum
@@ -0,0 +1,7 @@
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/google/gopacket/layerclass.go
new file mode 100644
index 0000000..775cd09
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layerclass.go
@@ -0,0 +1,107 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+// LayerClass is a set of LayerTypes, used for grabbing one of a number of
+// different types from a packet.
+type LayerClass interface {
+	// Contains returns true if the given layer type should be considered part
+	// of this layer class.
+	Contains(LayerType) bool
+	// LayerTypes returns the set of all layer types in this layer class.
+	// Note that this may not be a fast operation on all LayerClass
+	// implementations.
+	LayerTypes() []LayerType
+}
+
+// Contains implements LayerClass.
+func (l LayerType) Contains(a LayerType) bool {
+	return l == a
+}
+
+// LayerTypes implements LayerClass.
+func (l LayerType) LayerTypes() []LayerType {
+	return []LayerType{l}
+}
+
+// LayerClassSlice implements a LayerClass with a slice.
+type LayerClassSlice []bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (s LayerClassSlice) Contains(t LayerType) bool {
+	return int(t) < len(s) && s[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassSlice.
+// Because of LayerClassSlice's implementation, this could be quite slow.
+func (s LayerClassSlice) LayerTypes() (all []LayerType) {
+	for i := 0; i < len(s); i++ {
+		if s[i] {
+			all = append(all, LayerType(i))
+		}
+	}
+	return
+}
+
+// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of
+// size max(types) and setting slice[t] to true for each type t.  Note, if
+// you implement your own LayerType and give it a high value, this WILL create
+// a very large slice.
+func NewLayerClassSlice(types []LayerType) LayerClassSlice {
+	var max LayerType
+	for _, typ := range types {
+		if typ > max {
+			max = typ
+		}
+	}
+	t := make([]bool, int(max+1))
+	for _, typ := range types {
+		t[typ] = true
+	}
+	return t
+}
+
+// LayerClassMap implements a LayerClass with a map.
+type LayerClassMap map[LayerType]bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (m LayerClassMap) Contains(t LayerType) bool {
+	return m[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassMap.
+func (m LayerClassMap) LayerTypes() (all []LayerType) {
+	for t := range m {
+		all = append(all, t)
+	}
+	return
+}
+
+// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each
+// type in types.
+func NewLayerClassMap(types []LayerType) LayerClassMap {
+	m := LayerClassMap{}
+	for _, typ := range types {
+		m[typ] = true
+	}
+	return m
+}
+
+// NewLayerClass creates a LayerClass, attempting to be smart about which type
+// it creates based on which types are passed in.
+func NewLayerClass(types []LayerType) LayerClass {
+	for _, typ := range types {
+		if typ > maxLayerType {
+			// NewLayerClassSlice could create a very large object, so instead create
+			// a map.
+			return NewLayerClassMap(types)
+		}
+	}
+	return NewLayerClassSlice(types)
+}
diff --git a/vendor/github.com/google/gopacket/layers/.lint_blacklist b/vendor/github.com/google/gopacket/layers/.lint_blacklist
new file mode 100644
index 0000000..fded4f6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/.lint_blacklist
@@ -0,0 +1,39 @@
+dot11.go
+eap.go
+endpoints.go
+enums_generated.go
+enums.go
+ethernet.go
+geneve.go
+icmp4.go
+icmp6.go
+igmp.go
+ip4.go
+ip6.go
+layertypes.go
+linux_sll.go
+llc.go
+lldp.go
+mpls.go
+ndp.go
+ntp.go
+ospf.go
+pflog.go
+pppoe.go
+prism.go
+radiotap.go
+rudp.go
+sctp.go
+sflow.go
+tcp.go
+tcpip.go
+tls.go
+tls_alert.go
+tls_appdata.go
+tls_cipherspec.go
+tls_hanshake.go
+tls_test.go
+udp.go
+udplite.go
+usb.go
+vrrp.go
diff --git a/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/google/gopacket/layers/arp.go
new file mode 100644
index 0000000..49e05ac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/arp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// Potential values for ARP.Operation.
+const (
+	ARPRequest = 1
+	ARPReply   = 2
+)
+
+// ARP is a ARP packet header.
+type ARP struct {
+	BaseLayer
+	AddrType          LinkType
+	Protocol          EthernetType
+	HwAddressSize     uint8
+	ProtAddressSize   uint8
+	Operation         uint16
+	SourceHwAddress   []byte
+	SourceProtAddress []byte
+	DstHwAddress      []byte
+	DstProtAddress    []byte
+}
+
+// LayerType returns LayerTypeARP
+func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2]))
+	arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	arp.HwAddressSize = data[4]
+	arp.ProtAddressSize = data[5]
+	arp.Operation = binary.BigEndian.Uint16(data[6:8])
+	arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize]
+	arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize]
+	arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize]
+	arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize]
+
+	arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize
+	arp.Contents = data[:arpLength]
+	arp.Payload = data[arpLength:]
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress)
+	bytes, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	if opts.FixLengths {
+		if len(arp.SourceHwAddress) != len(arp.DstHwAddress) {
+			return errors.New("mismatched hardware address sizes")
+		}
+		arp.HwAddressSize = uint8(len(arp.SourceHwAddress))
+		if len(arp.SourceProtAddress) != len(arp.DstProtAddress) {
+			return errors.New("mismatched prot address sizes")
+		}
+		arp.ProtAddressSize = uint8(len(arp.SourceProtAddress))
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol))
+	bytes[4] = arp.HwAddressSize
+	bytes[5] = arp.ProtAddressSize
+	binary.BigEndian.PutUint16(bytes[6:], arp.Operation)
+	start := 8
+	for _, addr := range [][]byte{
+		arp.SourceHwAddress,
+		arp.SourceProtAddress,
+		arp.DstHwAddress,
+		arp.DstProtAddress,
+	} {
+		copy(bytes[start:], addr)
+		start += len(addr)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (arp *ARP) CanDecode() gopacket.LayerClass {
+	return LayerTypeARP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (arp *ARP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeARP(data []byte, p gopacket.PacketBuilder) error {
+
+	arp := &ARP{}
+	return decodingLayerDecoder(arp, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/google/gopacket/layers/base.go
new file mode 100644
index 0000000..cd59b46
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/base.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+// BaseLayer is a convenience struct which implements the LayerData and
+// LayerPayload functions of the Layer interface.
+type BaseLayer struct {
+	// Contents is the set of bytes that make up this layer.  IE: for an
+	// Ethernet packet, this would be the set of bytes making up the
+	// Ethernet frame.
+	Contents []byte
+	// Payload is the set of bytes contained by (but not part of) this
+	// Layer.  Again, to take Ethernet as an example, this would be the
+	// set of bytes encapsulated by the Ethernet protocol.
+	Payload []byte
+}
+
+// LayerContents returns the bytes of the packet layer.
+func (b *BaseLayer) LayerContents() []byte { return b.Contents }
+
+// LayerPayload returns the bytes contained within the packet layer.
+func (b *BaseLayer) LayerPayload() []byte { return b.Payload }
+
+type layerDecodingLayer interface {
+	gopacket.Layer
+	DecodeFromBytes([]byte, gopacket.DecodeFeedback) error
+	NextLayerType() gopacket.LayerType
+}
+
+func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error {
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	next := d.NextLayerType()
+	if next == gopacket.LayerTypeZero {
+		return nil
+	}
+	return p.NextDecoder(next)
+}
+
+// hacky way to zero out memory... there must be a better way?
+var lotsOfZeros [1024]byte
diff --git a/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/google/gopacket/layers/bfd.go
new file mode 100644
index 0000000..43030fb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/bfd.go
@@ -0,0 +1,481 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// BFD Control Packet Format
+// -------------------------
+// The current version of BFD's RFC (RFC 5880) contains the following
+// diagram for the BFD Control packet format:
+//
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |Vers |  Diag   |Sta|P|F|C|A|D|M|  Detect Mult  |    Length     |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                       My Discriminator                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                      Your Discriminator                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                    Desired Min TX Interval                    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                   Required Min RX Interval                    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                 Required Min Echo RX Interval                 |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//     An optional Authentication Section MAY be present:
+//
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |    Authentication Data...     |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Simple Password Authentication Section Format
+//     ---------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |  Password...  |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format
+//     ----------------------------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |   Reserved    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                        Sequence Number                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                      Auth Key/Digest...                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format
+//     ------------------------------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |   Reserved    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                        Sequence Number                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                       Auth Key/Hash...                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//     From https://tools.ietf.org/rfc/rfc5880.txt
+const bfdMinimumRecordSizeInBytes int = 24
+
+// BFDVersion represents the version as decoded from the BFD control message
+type BFDVersion uint8
+
+// BFDDiagnostic represents diagnostic infomation about a BFD session
+type BFDDiagnostic uint8
+
+// constants that define BFDDiagnostic flags
+const (
+	BFDDiagnosticNone               BFDDiagnostic = 0 // No Diagnostic
+	BFDDiagnosticTimeExpired        BFDDiagnostic = 1 // Control Detection Time Expired
+	BFDDiagnosticEchoFailed         BFDDiagnostic = 2 // Echo Function Failed
+	BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down
+	BFDDiagnosticForwardPlaneReset  BFDDiagnostic = 4 // Forwarding Plane Reset
+	BFDDiagnosticPathDown           BFDDiagnostic = 5 // Path Down
+	BFDDiagnosticConcatPathDown     BFDDiagnostic = 6 // Concatenated Path Down
+	BFDDiagnosticAdminDown          BFDDiagnostic = 7 // Administratively Down
+	BFDDiagnosticRevConcatPathDown  BFDDiagnostic = 8 // Reverse Concatenated Path Dow
+)
+
+// String returns a string version of BFDDiagnostic
+func (bd BFDDiagnostic) String() string {
+	switch bd {
+	default:
+		return "Unknown"
+	case BFDDiagnosticNone:
+		return "None"
+	case BFDDiagnosticTimeExpired:
+		return "Control Detection Time Expired"
+	case BFDDiagnosticEchoFailed:
+		return "Echo Function Failed"
+	case BFDDiagnosticNeighborSignalDown:
+		return "Neighbor Signaled Session Down"
+	case BFDDiagnosticForwardPlaneReset:
+		return "Forwarding Plane Reset"
+	case BFDDiagnosticPathDown:
+		return "Path Down"
+	case BFDDiagnosticConcatPathDown:
+		return "Concatenated Path Down"
+	case BFDDiagnosticAdminDown:
+		return "Administratively Down"
+	case BFDDiagnosticRevConcatPathDown:
+		return "Reverse Concatenated Path Down"
+	}
+}
+
+// BFDState represents the state of a BFD session
+type BFDState uint8
+
+// constants that define BFDState
+const (
+	BFDStateAdminDown BFDState = 0
+	BFDStateDown      BFDState = 1
+	BFDStateInit      BFDState = 2
+	BFDStateUp        BFDState = 3
+)
+
+// String returns a string version of BFDState
+func (s BFDState) String() string {
+	switch s {
+	default:
+		return "Unknown"
+	case BFDStateAdminDown:
+		return "Admin Down"
+	case BFDStateDown:
+		return "Down"
+	case BFDStateInit:
+		return "Init"
+	case BFDStateUp:
+		return "Up"
+	}
+}
+
+// BFDDetectMultiplier represents the negotiated transmit interval,
+// multiplied by this value, provides the Detection Time for the
+// receiving system in Asynchronous mode.
+type BFDDetectMultiplier uint8
+
+// BFDDiscriminator is a unique, nonzero discriminator value used
+// to demultiplex multiple BFD sessions between the same pair of systems.
+type BFDDiscriminator uint32
+
+// BFDTimeInterval represents a time interval in microseconds
+type BFDTimeInterval uint32
+
+// BFDAuthType represents the authentication used in the BFD session
+type BFDAuthType uint8
+
+// constants that define the BFDAuthType
+const (
+	BFDAuthTypeNone                BFDAuthType = 0 // No Auth
+	BFDAuthTypePassword            BFDAuthType = 1 // Simple Password
+	BFDAuthTypeKeyedMD5            BFDAuthType = 2 // Keyed MD5
+	BFDAuthTypeMeticulousKeyedMD5  BFDAuthType = 3 // Meticulous Keyed MD5
+	BFDAuthTypeKeyedSHA1           BFDAuthType = 4 // Keyed SHA1
+	BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1
+)
+
+// String returns a string version of BFDAuthType
+func (at BFDAuthType) String() string {
+	switch at {
+	default:
+		return "Unknown"
+	case BFDAuthTypeNone:
+		return "No Authentication"
+	case BFDAuthTypePassword:
+		return "Simple Password"
+	case BFDAuthTypeKeyedMD5:
+		return "Keyed MD5"
+	case BFDAuthTypeMeticulousKeyedMD5:
+		return "Meticulous Keyed MD5"
+	case BFDAuthTypeKeyedSHA1:
+		return "Keyed SHA1"
+	case BFDAuthTypeMeticulousKeyedSHA1:
+		return "Meticulous Keyed SHA1"
+	}
+}
+
+// BFDAuthKeyID represents the authentication key ID in use for
+// this packet.  This allows multiple keys to be active simultaneously.
+type BFDAuthKeyID uint8
+
+// BFDAuthSequenceNumber represents the sequence number for this packet.
+// For Keyed Authentication, this value is incremented occasionally.  For
+// Meticulous Keyed Authentication, this value is incremented for each
+// successive packet transmitted for a session.  This provides protection
+// against replay attacks.
+type BFDAuthSequenceNumber uint32
+
+// BFDAuthData represents the authentication key or digest
+type BFDAuthData []byte
+
+// BFDAuthHeader represents authentication data used in the BFD session
+type BFDAuthHeader struct {
+	AuthType       BFDAuthType
+	KeyID          BFDAuthKeyID
+	SequenceNumber BFDAuthSequenceNumber
+	Data           BFDAuthData
+}
+
+// Length returns the data length of the BFDAuthHeader based on the
+// authentication type
+func (h *BFDAuthHeader) Length() int {
+	switch h.AuthType {
+	case BFDAuthTypePassword:
+		return 3 + len(h.Data)
+	case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+		return 8 + len(h.Data)
+	case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+		return 8 + len(h.Data)
+	default:
+		return 0
+	}
+}
+
+// BFD represents a BFD control message packet whose payload contains
+// the control information required to for a BFD session.
+//
+// References
+// ----------
+//
+// Wikipedia's BFD entry:
+//     https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection
+//     This is the best place to get an overview of BFD.
+//
+// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010)
+//     https://tools.ietf.org/html/rfc5880
+//     This is the original BFD specification.
+//
+// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010)
+//     https://tools.ietf.org/html/rfc5881
+//     Describes the use of the Bidirectional Forwarding Detection (BFD)
+//     protocol over IPv4 and IPv6 for single IP hops.
+type BFD struct {
+	BaseLayer // Stores the packet bytes and payload bytes.
+
+	Version                   BFDVersion          // Version of the BFD protocol.
+	Diagnostic                BFDDiagnostic       // Diagnostic code for last state change
+	State                     BFDState            // Current state
+	Poll                      bool                // Requesting verification
+	Final                     bool                // Responding to a received BFD Control packet that had the Poll (P) bit set.
+	ControlPlaneIndependent   bool                // BFD implementation does not share fate with its control plane
+	AuthPresent               bool                // Authentication Section is present and the session is to be authenticated
+	Demand                    bool                // Demand mode is active
+	Multipoint                bool                // For future point-to-multipoint extensions. Must always be zero
+	DetectMultiplier          BFDDetectMultiplier // Detection time multiplier
+	MyDiscriminator           BFDDiscriminator    // A unique, nonzero discriminator value
+	YourDiscriminator         BFDDiscriminator    // discriminator received from the remote system.
+	DesiredMinTxInterval      BFDTimeInterval     // Minimum interval, in microseconds,  the local system would like to use when transmitting BFD Control packets
+	RequiredMinRxInterval     BFDTimeInterval     // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting
+	RequiredMinEchoRxInterval BFDTimeInterval     // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting
+	AuthHeader                *BFDAuthHeader      // Authentication data, variable length.
+}
+
+// Length returns the data length of a BFD Control message which
+// changes based on the presence and type of authentication
+// contained in the message
+func (d *BFD) Length() int {
+	if d.AuthPresent && (d.AuthHeader != nil) {
+		return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length()
+	}
+
+	return bfdMinimumRecordSizeInBytes
+}
+
+// LayerType returns the layer type of the BFD object, which is LayerTypeBFD.
+func (d *BFD) LayerType() gopacket.LayerType {
+	return LayerTypeBFD
+}
+
+// decodeBFD analyses a byte slice and attempts to decode it as a BFD
+// control packet
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the BFD layer.
+func decodeBFD(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &BFD{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return nil
+}
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD
+// control packet.
+//
+// Upon succeeds, it loads the BFD object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a BFD record, then return an error.
+	if len(data) < bfdMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("BFD packet too short")
+	}
+
+	pLen := uint8(data[3])
+	if len(data) != int(pLen) {
+		return errors.New("BFD packet length does not match")
+	}
+
+	// BFD type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level.
+	//    Payload is supposed to contain the payload of this level.
+	// Here we set the baselayer to be the bytes of the BFD record.
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// To make sense of this, refer to the packet diagram
+	// above and the section on endian conventions.
+
+	// The first few fields are all packed into the first 32 bits. Unpack them.
+	d.Version = BFDVersion(((data[0] & 0xE0) >> 5))
+	d.Diagnostic = BFDDiagnostic(data[0] & 0x1F)
+	data = data[1:]
+
+	d.State = BFDState((data[0] & 0xC0) >> 6)
+	d.Poll = data[0]&0x20 != 0
+	d.Final = data[0]&0x10 != 0
+	d.ControlPlaneIndependent = data[0]&0x08 != 0
+	d.AuthPresent = data[0]&0x04 != 0
+	d.Demand = data[0]&0x02 != 0
+	d.Multipoint = data[0]&0x01 != 0
+	data = data[1:]
+
+	data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0])
+	data, _ = data[1:], uint8(data[0]) // Consume length
+
+	// The remaining fields can just be copied in big endian order.
+	data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+	data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+	data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+	data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+	data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+
+	if d.AuthPresent && (len(data) > 2) {
+		d.AuthHeader = &BFDAuthHeader{}
+		data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0])
+		data, _ = data[1:], uint8(data[0]) // Consume length
+		data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0])
+
+		switch d.AuthHeader.AuthType {
+		case BFDAuthTypePassword:
+			d.AuthHeader.Data = BFDAuthData(data)
+		case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+			// Skipped reserved byte
+			data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+			d.AuthHeader.Data = BFDAuthData(data)
+		case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+			// Skipped reserved byte
+			data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+			d.AuthHeader.Data = BFDAuthData(data)
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes)
+	if err != nil {
+		return err
+	}
+
+	// Pack the first few fields into the first 32 bits.
+	data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic))
+	h := uint8(0)
+	h |= (uint8(d.State) << 6)
+	h |= (uint8(bool2uint8(d.Poll)) << 5)
+	h |= (uint8(bool2uint8(d.Final)) << 4)
+	h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3)
+	h |= (uint8(bool2uint8(d.AuthPresent)) << 2)
+	h |= (uint8(bool2uint8(d.Demand)) << 1)
+	h |= uint8(bool2uint8(d.Multipoint))
+	data[1] = byte(h)
+	data[2] = byte(d.DetectMultiplier)
+	data[3] = byte(d.Length())
+
+	// The remaining fields can just be copied in big endian order.
+	binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator))
+	binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator))
+	binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval))
+	binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval))
+	binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval))
+
+	if d.AuthPresent && (d.AuthHeader != nil) {
+		auth, err := b.AppendBytes(int(d.AuthHeader.Length()))
+		if err != nil {
+			return err
+		}
+
+		auth[0] = byte(d.AuthHeader.AuthType)
+		auth[1] = byte(d.AuthHeader.Length())
+		auth[2] = byte(d.AuthHeader.KeyID)
+
+		switch d.AuthHeader.AuthType {
+		case BFDAuthTypePassword:
+			copy(auth[3:], d.AuthHeader.Data)
+		case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+			auth[3] = byte(0)
+			binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+			copy(auth[8:], d.AuthHeader.Data)
+		case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+			auth[3] = byte(0)
+			binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+			copy(auth[8:], d.AuthHeader.Data)
+		}
+	}
+
+	return nil
+}
+
+// CanDecode returns a set of layers that BFD objects can decode.
+// As BFD objects can only decide the BFD layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *BFD) CanDecode() gopacket.LayerClass {
+	return LayerTypeBFD
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (BFD) layer. As BFD packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *BFD) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// Payload returns an empty byte slice as BFD packets do not carry a payload
+func (d *BFD) Payload() []byte {
+	return nil
+}
+
+// bool2uint8 converts a bool to uint8
+func bool2uint8(b bool) uint8 {
+	if b {
+		return 1
+	}
+	return 0
+}
diff --git a/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/google/gopacket/layers/cdp.go
new file mode 100644
index 0000000..d67203e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/cdp.go
@@ -0,0 +1,651 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+//   http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm
+//   https://code.google.com/p/ladvd/
+//   http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet.
+type CDPTLVType uint16
+
+// CDPTLVType values.
+const (
+	CDPTLVDevID              CDPTLVType = 0x0001
+	CDPTLVAddress            CDPTLVType = 0x0002
+	CDPTLVPortID             CDPTLVType = 0x0003
+	CDPTLVCapabilities       CDPTLVType = 0x0004
+	CDPTLVVersion            CDPTLVType = 0x0005
+	CDPTLVPlatform           CDPTLVType = 0x0006
+	CDPTLVIPPrefix           CDPTLVType = 0x0007
+	CDPTLVHello              CDPTLVType = 0x0008
+	CDPTLVVTPDomain          CDPTLVType = 0x0009
+	CDPTLVNativeVLAN         CDPTLVType = 0x000a
+	CDPTLVFullDuplex         CDPTLVType = 0x000b
+	CDPTLVVLANReply          CDPTLVType = 0x000e
+	CDPTLVVLANQuery          CDPTLVType = 0x000f
+	CDPTLVPower              CDPTLVType = 0x0010
+	CDPTLVMTU                CDPTLVType = 0x0011
+	CDPTLVExtendedTrust      CDPTLVType = 0x0012
+	CDPTLVUntrustedCOS       CDPTLVType = 0x0013
+	CDPTLVSysName            CDPTLVType = 0x0014
+	CDPTLVSysOID             CDPTLVType = 0x0015
+	CDPTLVMgmtAddresses      CDPTLVType = 0x0016
+	CDPTLVLocation           CDPTLVType = 0x0017
+	CDPTLVExternalPortID     CDPTLVType = 0x0018
+	CDPTLVPowerRequested     CDPTLVType = 0x0019
+	CDPTLVPowerAvailable     CDPTLVType = 0x001a
+	CDPTLVPortUnidirectional CDPTLVType = 0x001b
+	CDPTLVEnergyWise         CDPTLVType = 0x001d
+	CDPTLVSparePairPOE       CDPTLVType = 0x001f
+)
+
+// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer.
+type CiscoDiscoveryValue struct {
+	Type   CDPTLVType
+	Length uint16
+	Value  []byte
+}
+
+// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol.
+// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885
+type CiscoDiscovery struct {
+	BaseLayer
+	Version  byte
+	TTL      byte
+	Checksum uint16
+	Values   []CiscoDiscoveryValue
+}
+
+// CDPCapability is the set of capabilities advertised by a CDP device.
+type CDPCapability uint32
+
+// CDPCapability values.
+const (
+	CDPCapMaskRouter     CDPCapability = 0x0001
+	CDPCapMaskTBBridge   CDPCapability = 0x0002
+	CDPCapMaskSPBridge   CDPCapability = 0x0004
+	CDPCapMaskSwitch     CDPCapability = 0x0008
+	CDPCapMaskHost       CDPCapability = 0x0010
+	CDPCapMaskIGMPFilter CDPCapability = 0x0020
+	CDPCapMaskRepeater   CDPCapability = 0x0040
+	CDPCapMaskPhone      CDPCapability = 0x0080
+	CDPCapMaskRemote     CDPCapability = 0x0100
+)
+
+// CDPCapabilities represents the capabilities of a device
+type CDPCapabilities struct {
+	L3Router        bool
+	TBBridge        bool
+	SPBridge        bool
+	L2Switch        bool
+	IsHost          bool
+	IGMPFilter      bool
+	L1Repeater      bool
+	IsPhone         bool
+	RemotelyManaged bool
+}
+
+// CDP Power-over-Ethernet values.
+const (
+	CDPPoEFourWire  byte = 0x01
+	CDPPoEPDArch    byte = 0x02
+	CDPPoEPDRequest byte = 0x04
+	CDPPoEPSE       byte = 0x08
+)
+
+// CDPSparePairPoE provides information on PoE.
+type CDPSparePairPoE struct {
+	PSEFourWire  bool // Supported / Not supported
+	PDArchShared bool // Shared / Independent
+	PDRequestOn  bool // On / Off
+	PSEOn        bool // On / Off
+}
+
+// CDPVLANDialogue encapsulates a VLAN Query/Reply
+type CDPVLANDialogue struct {
+	ID   uint8
+	VLAN uint16
+}
+
+// CDPPowerDialogue encapsulates a Power Query/Reply
+type CDPPowerDialogue struct {
+	ID     uint16
+	MgmtID uint16
+	Values []uint32
+}
+
+// CDPLocation provides location information for a CDP device.
+type CDPLocation struct {
+	Type     uint8 // Undocumented
+	Location string
+}
+
+// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields)
+type CDPHello struct {
+	OUI              []byte
+	ProtocolID       uint16
+	ClusterMaster    net.IP
+	Unknown1         net.IP
+	Version          byte
+	SubVersion       byte
+	Status           byte
+	Unknown2         byte
+	ClusterCommander net.HardwareAddr
+	SwitchMAC        net.HardwareAddr
+	Unknown3         byte
+	ManagementVLAN   uint16
+}
+
+// CDPEnergyWiseSubtype is used within CDP to define TLV values.
+type CDPEnergyWiseSubtype uint32
+
+// CDPEnergyWiseSubtype values.
+const (
+	CDPEnergyWiseRole    CDPEnergyWiseSubtype = 0x00000007
+	CDPEnergyWiseDomain  CDPEnergyWiseSubtype = 0x00000008
+	CDPEnergyWiseName    CDPEnergyWiseSubtype = 0x00000009
+	CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017
+)
+
+// CDPEnergyWise is used by CDP to monitor and control power usage.
+type CDPEnergyWise struct {
+	EncryptedData  []byte
+	Unknown1       uint32
+	SequenceNumber uint32
+	ModelNumber    string
+	Unknown2       uint16
+	HardwareID     string
+	SerialNum      string
+	Unknown3       []byte
+	Role           string
+	Domain         string
+	Name           string
+	ReplyUnknown1  []byte
+	ReplyPort      []byte
+	ReplyAddress   []byte
+	ReplyUnknown2  []byte
+	ReplyUnknown3  []byte
+}
+
+// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues
+type CiscoDiscoveryInfo struct {
+	BaseLayer
+	CDPHello
+	DeviceID         string
+	Addresses        []net.IP
+	PortID           string
+	Capabilities     CDPCapabilities
+	Version          string
+	Platform         string
+	IPPrefixes       []net.IPNet
+	VTPDomain        string
+	NativeVLAN       uint16
+	FullDuplex       bool
+	VLANReply        CDPVLANDialogue
+	VLANQuery        CDPVLANDialogue
+	PowerConsumption uint16
+	MTU              uint32
+	ExtendedTrust    uint8
+	UntrustedCOS     uint8
+	SysName          string
+	SysOID           string
+	MgmtAddresses    []net.IP
+	Location         CDPLocation
+	PowerRequest     CDPPowerDialogue
+	PowerAvailable   CDPPowerDialogue
+	SparePairPoe     CDPSparePairPoE
+	EnergyWise       CDPEnergyWise
+	Unknown          []CiscoDiscoveryValue
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscovery.
+func (c *CiscoDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeCiscoDiscovery
+}
+
+func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	c := &CiscoDiscovery{
+		Version:  data[0],
+		TTL:      data[1],
+		Checksum: binary.BigEndian.Uint16(data[2:4]),
+	}
+	if c.Version != 1 && c.Version != 2 {
+		return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version)
+	}
+	var err error
+	c.Values, err = decodeCiscoDiscoveryTLVs(data[4:])
+	if err != nil {
+		return err
+	}
+	c.Contents = data[0:4]
+	c.Payload = data[4:]
+	p.AddLayer(c)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo))
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo.
+func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType {
+	return LayerTypeCiscoDiscoveryInfo
+}
+
+func decodeCiscoDiscoveryTLVs(data []byte) (values []CiscoDiscoveryValue, err error) {
+	for len(data) > 0 {
+		val := CiscoDiscoveryValue{
+			Type:   CDPTLVType(binary.BigEndian.Uint16(data[:2])),
+			Length: binary.BigEndian.Uint16(data[2:4]),
+		}
+		if val.Length < 4 {
+			err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length)
+			break
+		}
+		val.Value = data[4:val.Length]
+		values = append(values, val)
+		data = data[val.Length:]
+	}
+	return
+}
+
+func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error {
+	var err error
+	info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}}
+	p.AddLayer(info)
+	values, err := decodeCiscoDiscoveryTLVs(data)
+	if err != nil { // Unlikely, as parent decode will fail, but better safe...
+		return err
+	}
+	for _, val := range values {
+		switch val.Type {
+		case CDPTLVDevID:
+			info.DeviceID = string(val.Value)
+		case CDPTLVAddress:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.Addresses, err = decodeAddresses(val.Value)
+			if err != nil {
+				return err
+			}
+		case CDPTLVPortID:
+			info.PortID = string(val.Value)
+		case CDPTLVCapabilities:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4]))
+			info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0)
+			info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0)
+			info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0)
+			info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0)
+			info.Capabilities.IsHost = (val&CDPCapMaskHost > 0)
+			info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0)
+			info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0)
+			info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0)
+			info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0)
+		case CDPTLVVersion:
+			info.Version = string(val.Value)
+		case CDPTLVPlatform:
+			info.Platform = string(val.Value)
+		case CDPTLVIPPrefix:
+			v := val.Value
+			l := len(v)
+			if l%5 == 0 && l >= 5 {
+				for len(v) > 0 {
+					_, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4]))
+					info.IPPrefixes = append(info.IPPrefixes, *ipnet)
+					v = v[5:]
+				}
+			} else {
+				return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value))
+			}
+		case CDPTLVHello:
+			if err = checkCDPTLVLen(val, 32); err != nil {
+				return err
+			}
+			v := val.Value
+			info.CDPHello.OUI = v[0:3]
+			info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5])
+			info.CDPHello.ClusterMaster = v[5:9]
+			info.CDPHello.Unknown1 = v[9:13]
+			info.CDPHello.Version = v[13]
+			info.CDPHello.SubVersion = v[14]
+			info.CDPHello.Status = v[15]
+			info.CDPHello.Unknown2 = v[16]
+			info.CDPHello.ClusterCommander = v[17:23]
+			info.CDPHello.SwitchMAC = v[23:29]
+			info.CDPHello.Unknown3 = v[29]
+			info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32])
+		case CDPTLVVTPDomain:
+			info.VTPDomain = string(val.Value)
+		case CDPTLVNativeVLAN:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2])
+		case CDPTLVFullDuplex:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.FullDuplex = (val.Value[0] == 1)
+		case CDPTLVVLANReply:
+			if err = checkCDPTLVLen(val, 3); err != nil {
+				return err
+			}
+			info.VLANReply.ID = uint8(val.Value[0])
+			info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+		case CDPTLVVLANQuery:
+			if err = checkCDPTLVLen(val, 3); err != nil {
+				return err
+			}
+			info.VLANQuery.ID = uint8(val.Value[0])
+			info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+		case CDPTLVPower:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2])
+		case CDPTLVMTU:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.MTU = binary.BigEndian.Uint32(val.Value[0:4])
+		case CDPTLVExtendedTrust:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.ExtendedTrust = uint8(val.Value[0])
+		case CDPTLVUntrustedCOS:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.UntrustedCOS = uint8(val.Value[0])
+		case CDPTLVSysName:
+			info.SysName = string(val.Value)
+		case CDPTLVSysOID:
+			info.SysOID = string(val.Value)
+		case CDPTLVMgmtAddresses:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.MgmtAddresses, err = decodeAddresses(val.Value)
+			if err != nil {
+				return err
+			}
+		case CDPTLVLocation:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.Location.Type = uint8(val.Value[0])
+			info.Location.Location = string(val.Value[1:])
+
+			//		case CDPTLVLExternalPortID:
+			//			Undocumented
+		case CDPTLVPowerRequested:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2])
+			info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+			for n := 4; n < len(val.Value); n += 4 {
+				info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+			}
+		case CDPTLVPowerAvailable:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2])
+			info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+			for n := 4; n < len(val.Value); n += 4 {
+				info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+			}
+			//		case CDPTLVPortUnidirectional
+			//			Undocumented
+		case CDPTLVEnergyWise:
+			if err = checkCDPTLVLen(val, 72); err != nil {
+				return err
+			}
+			info.EnergyWise.EncryptedData = val.Value[0:20]
+			info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24])
+			info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28])
+			info.EnergyWise.ModelNumber = string(val.Value[28:44])
+			info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46])
+			info.EnergyWise.HardwareID = string(val.Value[46:49])
+			info.EnergyWise.SerialNum = string(val.Value[49:60])
+			info.EnergyWise.Unknown3 = val.Value[60:68]
+			tlvLen := binary.BigEndian.Uint16(val.Value[68:70])
+			tlvNum := binary.BigEndian.Uint16(val.Value[70:72])
+			data := val.Value[72:]
+			if len(data) < int(tlvLen) {
+				return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data))
+			}
+			numSeen := 0
+			for len(data) > 8 {
+				numSeen++
+				if numSeen > int(tlvNum) { // Too many TLV's ?
+					return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen)
+				}
+				tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4]))
+				tLen := int(binary.BigEndian.Uint32(data[4:8]))
+				if tLen > len(data)-8 {
+					return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8)
+				}
+				data = data[8:]
+				switch tType {
+				case CDPEnergyWiseRole:
+					info.EnergyWise.Role = string(data[:])
+				case CDPEnergyWiseDomain:
+					info.EnergyWise.Domain = string(data[:])
+				case CDPEnergyWiseName:
+					info.EnergyWise.Name = string(data[:])
+				case CDPEnergyWiseReplyTo:
+					if len(data) >= 18 {
+						info.EnergyWise.ReplyUnknown1 = data[0:2]
+						info.EnergyWise.ReplyPort = data[2:4]
+						info.EnergyWise.ReplyAddress = data[4:8]
+						info.EnergyWise.ReplyUnknown2 = data[8:10]
+						info.EnergyWise.ReplyUnknown3 = data[10:14]
+					}
+				}
+				data = data[tLen:]
+			}
+		case CDPTLVSparePairPOE:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			v := val.Value[0]
+			info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0)
+			info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0)
+			info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0)
+			info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0)
+		default:
+			info.Unknown = append(info.Unknown, val)
+		}
+	}
+	return nil
+}
+
+// CDP Protocol Types
+const (
+	CDPProtocolTypeNLPID byte = 1
+	CDPProtocolType802_2 byte = 2
+)
+
+// CDPAddressType is used to define TLV values within CDP addresses.
+type CDPAddressType uint64
+
+// CDP Address types.
+const (
+	CDPAddressTypeCLNP      CDPAddressType = 0x81
+	CDPAddressTypeIPV4      CDPAddressType = 0xcc
+	CDPAddressTypeIPV6      CDPAddressType = 0xaaaa030000000800
+	CDPAddressTypeDECNET    CDPAddressType = 0xaaaa030000006003
+	CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b
+	CDPAddressTypeIPX       CDPAddressType = 0xaaaa030000008137
+	CDPAddressTypeVINES     CDPAddressType = 0xaaaa0300000080c4
+	CDPAddressTypeXNS       CDPAddressType = 0xaaaa030000000600
+	CDPAddressTypeAPOLLO    CDPAddressType = 0xaaaa030000008019
+)
+
+func decodeAddresses(v []byte) (addresses []net.IP, err error) {
+	numaddr := int(binary.BigEndian.Uint32(v[0:4]))
+	if numaddr < 1 {
+		return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr)
+	}
+	v = v[4:]
+	if len(v) < numaddr*8 {
+		return nil, fmt.Errorf("Invalid Address TLV length %d", len(v))
+	}
+	for i := 0; i < numaddr; i++ {
+		prottype := v[0]
+		if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type
+			return nil, fmt.Errorf("Invalid Address Protocol %d", prottype)
+		}
+		protlen := int(v[1])
+		if (prottype == CDPProtocolTypeNLPID && protlen != 1) ||
+			(prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length
+			return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen)
+		}
+		plen := make([]byte, 8)
+		copy(plen[8-protlen:], v[2:2+protlen])
+		protocol := CDPAddressType(binary.BigEndian.Uint64(plen))
+		v = v[2+protlen:]
+		addrlen := binary.BigEndian.Uint16(v[0:2])
+		ab := v[2 : 2+addrlen]
+		if protocol == CDPAddressTypeIPV4 && addrlen == 4 {
+			addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3]))
+		} else if protocol == CDPAddressTypeIPV6 && addrlen == 16 {
+			addresses = append(addresses, net.IP(ab))
+		} else {
+			// only handle IPV4 & IPV6 for now
+		}
+		v = v[2+addrlen:]
+		if len(v) < 8 {
+			break
+		}
+	}
+	return
+}
+
+func (t CDPTLVType) String() (s string) {
+	switch t {
+	case CDPTLVDevID:
+		s = "Device ID"
+	case CDPTLVAddress:
+		s = "Addresses"
+	case CDPTLVPortID:
+		s = "Port ID"
+	case CDPTLVCapabilities:
+		s = "Capabilities"
+	case CDPTLVVersion:
+		s = "Software Version"
+	case CDPTLVPlatform:
+		s = "Platform"
+	case CDPTLVIPPrefix:
+		s = "IP Prefix"
+	case CDPTLVHello:
+		s = "Protocol Hello"
+	case CDPTLVVTPDomain:
+		s = "VTP Management Domain"
+	case CDPTLVNativeVLAN:
+		s = "Native VLAN"
+	case CDPTLVFullDuplex:
+		s = "Full Duplex"
+	case CDPTLVVLANReply:
+		s = "VoIP VLAN Reply"
+	case CDPTLVVLANQuery:
+		s = "VLANQuery"
+	case CDPTLVPower:
+		s = "Power consumption"
+	case CDPTLVMTU:
+		s = "MTU"
+	case CDPTLVExtendedTrust:
+		s = "Extended Trust Bitmap"
+	case CDPTLVUntrustedCOS:
+		s = "Untrusted Port CoS"
+	case CDPTLVSysName:
+		s = "System Name"
+	case CDPTLVSysOID:
+		s = "System OID"
+	case CDPTLVMgmtAddresses:
+		s = "Management Addresses"
+	case CDPTLVLocation:
+		s = "Location"
+	case CDPTLVExternalPortID:
+		s = "External Port ID"
+	case CDPTLVPowerRequested:
+		s = "Power Requested"
+	case CDPTLVPowerAvailable:
+		s = "Power Available"
+	case CDPTLVPortUnidirectional:
+		s = "Port Unidirectional"
+	case CDPTLVEnergyWise:
+		s = "Energy Wise"
+	case CDPTLVSparePairPOE:
+		s = "Spare Pair POE"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (a CDPAddressType) String() (s string) {
+	switch a {
+	case CDPAddressTypeCLNP:
+		s = "Connectionless Network Protocol"
+	case CDPAddressTypeIPV4:
+		s = "IPv4"
+	case CDPAddressTypeIPV6:
+		s = "IPv6"
+	case CDPAddressTypeDECNET:
+		s = "DECnet Phase IV"
+	case CDPAddressTypeAPPLETALK:
+		s = "Apple Talk"
+	case CDPAddressTypeIPX:
+		s = "Novell IPX"
+	case CDPAddressTypeVINES:
+		s = "Banyan VINES"
+	case CDPAddressTypeXNS:
+		s = "Xerox Network Systems"
+	case CDPAddressTypeAPOLLO:
+		s = "Apollo"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t CDPEnergyWiseSubtype) String() (s string) {
+	switch t {
+	case CDPEnergyWiseRole:
+		s = "Role"
+	case CDPEnergyWiseDomain:
+		s = "Domain"
+	case CDPEnergyWiseName:
+		s = "Name"
+	case CDPEnergyWiseReplyTo:
+		s = "ReplyTo"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) {
+	if len(v.Value) < l {
+		err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value))
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/google/gopacket/layers/ctp.go
new file mode 100644
index 0000000..8287584
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ctp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each
+// EthernetCTP layer.
+type EthernetCTPFunction uint16
+
+// EthernetCTPFunction values.
+const (
+	EthernetCTPFunctionReply       EthernetCTPFunction = 1
+	EthernetCTPFunctionForwardData EthernetCTPFunction = 2
+)
+
+// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html.
+// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more
+// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer.
+type EthernetCTP struct {
+	BaseLayer
+	SkipCount uint16
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTP.
+func (c *EthernetCTP) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTP
+}
+
+// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP.  See EthernetCTP's docs for more
+// details.
+type EthernetCTPForwardData struct {
+	BaseLayer
+	Function       EthernetCTPFunction
+	ForwardAddress []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPForwardData.
+func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTPForwardData
+}
+
+// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint.
+func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress)
+}
+
+// EthernetCTPReply is the Reply layer inside EthernetCTP.  See EthernetCTP's docs for more details.
+type EthernetCTPReply struct {
+	BaseLayer
+	Function      EthernetCTPFunction
+	ReceiptNumber uint16
+	Data          []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPReply.
+func (c *EthernetCTPReply) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTPReply
+}
+
+// Payload returns the EthernetCTP reply's Data bytes.
+func (c *EthernetCTPReply) Payload() []byte { return c.Data }
+
+func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error {
+	c := &EthernetCTP{
+		SkipCount: binary.LittleEndian.Uint16(data[:2]),
+		BaseLayer: BaseLayer{data[:2], data[2:]},
+	}
+	if c.SkipCount%2 != 0 {
+		return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount)
+	}
+	p.AddLayer(c)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+}
+
+// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP
+// layer type to decode next, then decodes based on that.
+func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error {
+	function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2]))
+	switch function {
+	case EthernetCTPFunctionReply:
+		reply := &EthernetCTPReply{
+			Function:      function,
+			ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]),
+			Data:          data[4:],
+			BaseLayer:     BaseLayer{data, nil},
+		}
+		p.AddLayer(reply)
+		p.SetApplicationLayer(reply)
+		return nil
+	case EthernetCTPFunctionForwardData:
+		forward := &EthernetCTPForwardData{
+			Function:       function,
+			ForwardAddress: data[2:8],
+			BaseLayer:      BaseLayer{data[:8], data[8:]},
+		}
+		p.AddLayer(forward)
+		return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+	}
+	return fmt.Errorf("Unknown EthernetCTP function type %v", function)
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/google/gopacket/layers/dhcpv4.go
new file mode 100644
index 0000000..3bbd036
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv4.go
@@ -0,0 +1,585 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// DHCPOp rerprents a bootp operation
+type DHCPOp byte
+
+// bootp operations
+const (
+	DHCPOpRequest DHCPOp = 1
+	DHCPOpReply   DHCPOp = 2
+)
+
+// String returns a string version of a DHCPOp.
+func (o DHCPOp) String() string {
+	switch o {
+	case DHCPOpRequest:
+		return "Request"
+	case DHCPOpReply:
+		return "Reply"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPMsgType represents a DHCP operation
+type DHCPMsgType byte
+
+// Constants that represent DHCP operations
+const (
+	DHCPMsgTypeUnspecified DHCPMsgType = iota
+	DHCPMsgTypeDiscover
+	DHCPMsgTypeOffer
+	DHCPMsgTypeRequest
+	DHCPMsgTypeDecline
+	DHCPMsgTypeAck
+	DHCPMsgTypeNak
+	DHCPMsgTypeRelease
+	DHCPMsgTypeInform
+)
+
+// String returns a string version of a DHCPMsgType.
+func (o DHCPMsgType) String() string {
+	switch o {
+	case DHCPMsgTypeUnspecified:
+		return "Unspecified"
+	case DHCPMsgTypeDiscover:
+		return "Discover"
+	case DHCPMsgTypeOffer:
+		return "Offer"
+	case DHCPMsgTypeRequest:
+		return "Request"
+	case DHCPMsgTypeDecline:
+		return "Decline"
+	case DHCPMsgTypeAck:
+		return "Ack"
+	case DHCPMsgTypeNak:
+		return "Nak"
+	case DHCPMsgTypeRelease:
+		return "Release"
+	case DHCPMsgTypeInform:
+		return "Inform"
+	default:
+		return "Unknown"
+	}
+}
+
+//DHCPMagic is the RFC 2131 "magic cooke" for DHCP.
+var DHCPMagic uint32 = 0x63825363
+
+// DHCPv4 contains data for a single DHCP packet.
+type DHCPv4 struct {
+	BaseLayer
+	Operation    DHCPOp
+	HardwareType LinkType
+	HardwareLen  uint8
+	HardwareOpts uint8
+	Xid          uint32
+	Secs         uint16
+	Flags        uint16
+	ClientIP     net.IP
+	YourClientIP net.IP
+	NextServerIP net.IP
+	RelayAgentIP net.IP
+	ClientHWAddr net.HardwareAddr
+	ServerName   []byte
+	File         []byte
+	Options      DHCPOptions
+}
+
+// DHCPOptions is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPOptions []DHCPOption
+
+// String returns a string version of the options list.
+func (o DHCPOptions) String() string {
+	buf := &bytes.Buffer{}
+	buf.WriteByte('[')
+	for i, opt := range o {
+		buf.WriteString(opt.String())
+		if i+1 != len(o) {
+			buf.WriteString(", ")
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv4
+func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	d.Options = d.Options[:0]
+	d.Operation = DHCPOp(data[0])
+	d.HardwareType = LinkType(data[1])
+	d.HardwareLen = data[2]
+	d.HardwareOpts = data[3]
+	d.Xid = binary.BigEndian.Uint32(data[4:8])
+	d.Secs = binary.BigEndian.Uint16(data[8:10])
+	d.Flags = binary.BigEndian.Uint16(data[10:12])
+	d.ClientIP = net.IP(data[12:16])
+	d.YourClientIP = net.IP(data[16:20])
+	d.NextServerIP = net.IP(data[20:24])
+	d.RelayAgentIP = net.IP(data[24:28])
+	d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen])
+	d.ServerName = data[44:108]
+	d.File = data[108:236]
+	if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic {
+		return InvalidMagicCookie
+	}
+
+	if len(data) <= 240 {
+		// DHCP Packet could have no option (??)
+		return nil
+	}
+
+	options := data[240:]
+
+	stop := len(options)
+	start := 0
+	for start < stop {
+		o := DHCPOption{}
+		if err := o.decode(options[start:]); err != nil {
+			return err
+		}
+		if o.Type == DHCPOptEnd {
+			break
+		}
+		d.Options = append(d.Options, o)
+		// Check if the option is a single byte pad
+		if o.Type == DHCPOptPad {
+			start++
+		} else {
+			start += int(o.Length) + 2
+		}
+	}
+	return nil
+}
+
+// Len returns the length of a DHCPv4 packet.
+func (d *DHCPv4) Len() uint16 {
+	n := uint16(240)
+	for _, o := range d.Options {
+		if o.Type == DHCPOptPad {
+			n++
+		} else {
+			n += uint16(o.Length) + 2
+		}
+	}
+	n++ // for opt end
+	return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	plen := int(d.Len())
+
+	data, err := b.PrependBytes(plen)
+	if err != nil {
+		return err
+	}
+
+	data[0] = byte(d.Operation)
+	data[1] = byte(d.HardwareType)
+	if opts.FixLengths {
+		d.HardwareLen = uint8(len(d.ClientHWAddr))
+	}
+	data[2] = d.HardwareLen
+	data[3] = d.HardwareOpts
+	binary.BigEndian.PutUint32(data[4:8], d.Xid)
+	binary.BigEndian.PutUint16(data[8:10], d.Secs)
+	binary.BigEndian.PutUint16(data[10:12], d.Flags)
+	copy(data[12:16], d.ClientIP.To4())
+	copy(data[16:20], d.YourClientIP.To4())
+	copy(data[20:24], d.NextServerIP.To4())
+	copy(data[24:28], d.RelayAgentIP.To4())
+	copy(data[28:44], d.ClientHWAddr)
+	copy(data[44:108], d.ServerName)
+	copy(data[108:236], d.File)
+	binary.BigEndian.PutUint32(data[236:240], DHCPMagic)
+
+	if len(d.Options) > 0 {
+		offset := 240
+		for _, o := range d.Options {
+			if err := o.encode(data[offset:]); err != nil {
+				return err
+			}
+			// A pad option is only a single byte
+			if o.Type == DHCPOptPad {
+				offset++
+			} else {
+				offset += 2 + len(o.Data)
+			}
+		}
+		optend := NewDHCPOption(DHCPOptEnd, nil)
+		if err := optend.encode(data[offset:]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeDHCPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv4) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error {
+	dhcp := &DHCPv4{}
+	err := dhcp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(dhcp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPOpt represents a DHCP option or parameter from RFC-2132
+type DHCPOpt byte
+
+// Constants for the DHCPOpt options.
+const (
+	DHCPOptPad                   DHCPOpt = 0
+	DHCPOptSubnetMask            DHCPOpt = 1   // 4, net.IP
+	DHCPOptTimeOffset            DHCPOpt = 2   // 4, int32 (signed seconds from UTC)
+	DHCPOptRouter                DHCPOpt = 3   // n*4, [n]net.IP
+	DHCPOptTimeServer            DHCPOpt = 4   // n*4, [n]net.IP
+	DHCPOptNameServer            DHCPOpt = 5   // n*4, [n]net.IP
+	DHCPOptDNS                   DHCPOpt = 6   // n*4, [n]net.IP
+	DHCPOptLogServer             DHCPOpt = 7   // n*4, [n]net.IP
+	DHCPOptCookieServer          DHCPOpt = 8   // n*4, [n]net.IP
+	DHCPOptLPRServer             DHCPOpt = 9   // n*4, [n]net.IP
+	DHCPOptImpressServer         DHCPOpt = 10  // n*4, [n]net.IP
+	DHCPOptResLocServer          DHCPOpt = 11  // n*4, [n]net.IP
+	DHCPOptHostname              DHCPOpt = 12  // n, string
+	DHCPOptBootfileSize          DHCPOpt = 13  // 2, uint16
+	DHCPOptMeritDumpFile         DHCPOpt = 14  // >1, string
+	DHCPOptDomainName            DHCPOpt = 15  // n, string
+	DHCPOptSwapServer            DHCPOpt = 16  // n*4, [n]net.IP
+	DHCPOptRootPath              DHCPOpt = 17  // n, string
+	DHCPOptExtensionsPath        DHCPOpt = 18  // n, string
+	DHCPOptIPForwarding          DHCPOpt = 19  // 1, bool
+	DHCPOptSourceRouting         DHCPOpt = 20  // 1, bool
+	DHCPOptPolicyFilter          DHCPOpt = 21  // 8*n, [n]{net.IP/net.IP}
+	DHCPOptDatagramMTU           DHCPOpt = 22  // 2, uint16
+	DHCPOptDefaultTTL            DHCPOpt = 23  // 1, byte
+	DHCPOptPathMTUAgingTimeout   DHCPOpt = 24  // 4, uint32
+	DHCPOptPathPlateuTableOption DHCPOpt = 25  // 2*n, []uint16
+	DHCPOptInterfaceMTU          DHCPOpt = 26  // 2, uint16
+	DHCPOptAllSubsLocal          DHCPOpt = 27  // 1, bool
+	DHCPOptBroadcastAddr         DHCPOpt = 28  // 4, net.IP
+	DHCPOptMaskDiscovery         DHCPOpt = 29  // 1, bool
+	DHCPOptMaskSupplier          DHCPOpt = 30  // 1, bool
+	DHCPOptRouterDiscovery       DHCPOpt = 31  // 1, bool
+	DHCPOptSolicitAddr           DHCPOpt = 32  // 4, net.IP
+	DHCPOptStaticRoute           DHCPOpt = 33  // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask
+	DHCPOptARPTrailers           DHCPOpt = 34  // 1, bool
+	DHCPOptARPTimeout            DHCPOpt = 35  // 4, uint32
+	DHCPOptEthernetEncap         DHCPOpt = 36  // 1, bool
+	DHCPOptTCPTTL                DHCPOpt = 37  // 1, byte
+	DHCPOptTCPKeepAliveInt       DHCPOpt = 38  // 4, uint32
+	DHCPOptTCPKeepAliveGarbage   DHCPOpt = 39  // 1, bool
+	DHCPOptNISDomain             DHCPOpt = 40  // n, string
+	DHCPOptNISServers            DHCPOpt = 41  // 4*n,  [n]net.IP
+	DHCPOptNTPServers            DHCPOpt = 42  // 4*n, [n]net.IP
+	DHCPOptVendorOption          DHCPOpt = 43  // n, [n]byte // may be encapsulated.
+	DHCPOptNetBIOSTCPNS          DHCPOpt = 44  // 4*n, [n]net.IP
+	DHCPOptNetBIOSTCPDDS         DHCPOpt = 45  // 4*n, [n]net.IP
+	DHCPOptNETBIOSTCPNodeType    DHCPOpt = 46  // 1, magic byte
+	DHCPOptNetBIOSTCPScope       DHCPOpt = 47  // n, string
+	DHCPOptXFontServer           DHCPOpt = 48  // n, string
+	DHCPOptXDisplayManager       DHCPOpt = 49  // n, string
+	DHCPOptRequestIP             DHCPOpt = 50  // 4, net.IP
+	DHCPOptLeaseTime             DHCPOpt = 51  // 4, uint32
+	DHCPOptExtOptions            DHCPOpt = 52  // 1, 1/2/3
+	DHCPOptMessageType           DHCPOpt = 53  // 1, 1-7
+	DHCPOptServerID              DHCPOpt = 54  // 4, net.IP
+	DHCPOptParamsRequest         DHCPOpt = 55  // n, []byte
+	DHCPOptMessage               DHCPOpt = 56  // n, 3
+	DHCPOptMaxMessageSize        DHCPOpt = 57  // 2, uint16
+	DHCPOptT1                    DHCPOpt = 58  // 4, uint32
+	DHCPOptT2                    DHCPOpt = 59  // 4, uint32
+	DHCPOptClassID               DHCPOpt = 60  // n, []byte
+	DHCPOptClientID              DHCPOpt = 61  // n >=  2, []byte
+	DHCPOptDomainSearch          DHCPOpt = 119 // n, string
+	DHCPOptSIPServers            DHCPOpt = 120 // n, url
+	DHCPOptClasslessStaticRoute  DHCPOpt = 121 //
+	DHCPOptEnd                   DHCPOpt = 255
+)
+
+// String returns a string version of a DHCPOpt.
+func (o DHCPOpt) String() string {
+	switch o {
+	case DHCPOptPad:
+		return "(padding)"
+	case DHCPOptSubnetMask:
+		return "SubnetMask"
+	case DHCPOptTimeOffset:
+		return "TimeOffset"
+	case DHCPOptRouter:
+		return "Router"
+	case DHCPOptTimeServer:
+		return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP
+	case DHCPOptNameServer:
+		return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS
+	case DHCPOptDNS:
+		return "DNS"
+	case DHCPOptLogServer:
+		return "mitLCS" // MIT LCS server protocol yada yada w. Syslog
+	case DHCPOptCookieServer:
+		return "CookieServer"
+	case DHCPOptLPRServer:
+		return "LPRServer"
+	case DHCPOptImpressServer:
+		return "ImpressServer"
+	case DHCPOptResLocServer:
+		return "ResourceLocationServer"
+	case DHCPOptHostname:
+		return "Hostname"
+	case DHCPOptBootfileSize:
+		return "BootfileSize"
+	case DHCPOptMeritDumpFile:
+		return "MeritDumpFile"
+	case DHCPOptDomainName:
+		return "DomainName"
+	case DHCPOptSwapServer:
+		return "SwapServer"
+	case DHCPOptRootPath:
+		return "RootPath"
+	case DHCPOptExtensionsPath:
+		return "ExtensionsPath"
+	case DHCPOptIPForwarding:
+		return "IPForwarding"
+	case DHCPOptSourceRouting:
+		return "SourceRouting"
+	case DHCPOptPolicyFilter:
+		return "PolicyFilter"
+	case DHCPOptDatagramMTU:
+		return "DatagramMTU"
+	case DHCPOptDefaultTTL:
+		return "DefaultTTL"
+	case DHCPOptPathMTUAgingTimeout:
+		return "PathMTUAgingTimeout"
+	case DHCPOptPathPlateuTableOption:
+		return "PathPlateuTableOption"
+	case DHCPOptInterfaceMTU:
+		return "InterfaceMTU"
+	case DHCPOptAllSubsLocal:
+		return "AllSubsLocal"
+	case DHCPOptBroadcastAddr:
+		return "BroadcastAddress"
+	case DHCPOptMaskDiscovery:
+		return "MaskDiscovery"
+	case DHCPOptMaskSupplier:
+		return "MaskSupplier"
+	case DHCPOptRouterDiscovery:
+		return "RouterDiscovery"
+	case DHCPOptSolicitAddr:
+		return "SolicitAddr"
+	case DHCPOptStaticRoute:
+		return "StaticRoute"
+	case DHCPOptARPTrailers:
+		return "ARPTrailers"
+	case DHCPOptARPTimeout:
+		return "ARPTimeout"
+	case DHCPOptEthernetEncap:
+		return "EthernetEncap"
+	case DHCPOptTCPTTL:
+		return "TCPTTL"
+	case DHCPOptTCPKeepAliveInt:
+		return "TCPKeepAliveInt"
+	case DHCPOptTCPKeepAliveGarbage:
+		return "TCPKeepAliveGarbage"
+	case DHCPOptNISDomain:
+		return "NISDomain"
+	case DHCPOptNISServers:
+		return "NISServers"
+	case DHCPOptNTPServers:
+		return "NTPServers"
+	case DHCPOptVendorOption:
+		return "VendorOption"
+	case DHCPOptNetBIOSTCPNS:
+		return "NetBIOSOverTCPNS"
+	case DHCPOptNetBIOSTCPDDS:
+		return "NetBiosOverTCPDDS"
+	case DHCPOptNETBIOSTCPNodeType:
+		return "NetBIOSOverTCPNodeType"
+	case DHCPOptNetBIOSTCPScope:
+		return "NetBIOSOverTCPScope"
+	case DHCPOptXFontServer:
+		return "XFontServer"
+	case DHCPOptXDisplayManager:
+		return "XDisplayManager"
+	case DHCPOptEnd:
+		return "(end)"
+	case DHCPOptSIPServers:
+		return "SipServers"
+	case DHCPOptRequestIP:
+		return "RequestIP"
+	case DHCPOptLeaseTime:
+		return "LeaseTime"
+	case DHCPOptExtOptions:
+		return "ExtOpts"
+	case DHCPOptMessageType:
+		return "MessageType"
+	case DHCPOptServerID:
+		return "ServerID"
+	case DHCPOptParamsRequest:
+		return "ParamsRequest"
+	case DHCPOptMessage:
+		return "Message"
+	case DHCPOptMaxMessageSize:
+		return "MaxDHCPSize"
+	case DHCPOptT1:
+		return "Timer1"
+	case DHCPOptT2:
+		return "Timer2"
+	case DHCPOptClassID:
+		return "ClassID"
+	case DHCPOptClientID:
+		return "ClientID"
+	case DHCPOptDomainSearch:
+		return "DomainSearch"
+	case DHCPOptClasslessStaticRoute:
+		return "ClasslessStaticRoute"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPOption rerpresents a DHCP option.
+type DHCPOption struct {
+	Type   DHCPOpt
+	Length uint8
+	Data   []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPOption) String() string {
+	switch o.Type {
+
+	case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath,
+		DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer,
+		DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string
+		return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data))
+
+	case DHCPOptMessageType:
+		if len(o.Data) != 1 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0]))
+
+	case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr,
+		DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP
+		if len(o.Data) < 4 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data))
+
+	case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout,
+		DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32
+		if len(o.Data) != 4 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%d)", o.Type,
+			uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3]))
+
+	case DHCPOptParamsRequest:
+		buf := &bytes.Buffer{}
+		buf.WriteString(fmt.Sprintf("Option(%s:", o.Type))
+		for i, v := range o.Data {
+			buf.WriteString(DHCPOpt(v).String())
+			if i+1 != len(o.Data) {
+				buf.WriteByte(',')
+			}
+		}
+		buf.WriteString(")")
+		return buf.String()
+
+	default:
+		return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data)
+	}
+}
+
+// NewDHCPOption constructs a new DHCPOption with a given type and data.
+func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption {
+	o := DHCPOption{Type: t}
+	if data != nil {
+		o.Data = data
+		o.Length = uint8(len(data))
+	}
+	return o
+}
+
+func (o *DHCPOption) encode(b []byte) error {
+	switch o.Type {
+	case DHCPOptPad, DHCPOptEnd:
+		b[0] = byte(o.Type)
+	default:
+		b[0] = byte(o.Type)
+		b[1] = o.Length
+		copy(b[2:], o.Data)
+	}
+	return nil
+}
+
+func (o *DHCPOption) decode(data []byte) error {
+	if len(data) < 1 {
+		// Pad/End have a length of 1
+		return DecOptionNotEnoughData
+	}
+	o.Type = DHCPOpt(data[0])
+	switch o.Type {
+	case DHCPOptPad, DHCPOptEnd:
+		o.Data = nil
+	default:
+		if len(data) < 2 {
+			return DecOptionNotEnoughData
+		}
+		o.Length = data[1]
+		if int(o.Length) > len(data[2:]) {
+			return DecOptionMalformed
+		}
+		o.Data = data[2 : 2+int(o.Length)]
+	}
+	return nil
+}
+
+// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts.
+type DHCPv4Error string
+
+// DHCPv4Error implements error interface.
+func (d DHCPv4Error) Error() string {
+	return string(d)
+}
+
+const (
+	// DecOptionNotEnoughData is returned when there is not enough data during option's decode process
+	DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode")
+	// DecOptionMalformed is returned when the option is malformed
+	DecOptionMalformed = DHCPv4Error("Option is malformed")
+	// InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header
+	InvalidMagicCookie = DHCPv4Error("Bad DHCP header")
+)
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/google/gopacket/layers/dhcpv6.go
new file mode 100644
index 0000000..052b394
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6.go
@@ -0,0 +1,341 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// DHCPv6MsgType represents a DHCPv6 operation
+type DHCPv6MsgType byte
+
+// Constants that represent DHCP operations
+const (
+	DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota
+	DHCPv6MsgTypeSolicit
+	DHCPv6MsgTypeAdverstise
+	DHCPv6MsgTypeRequest
+	DHCPv6MsgTypeConfirm
+	DHCPv6MsgTypeRenew
+	DHCPv6MsgTypeRebind
+	DHCPv6MsgTypeReply
+	DHCPv6MsgTypeRelease
+	DHCPv6MsgTypeDecline
+	DHCPv6MsgTypeReconfigure
+	DHCPv6MsgTypeInformationRequest
+	DHCPv6MsgTypeRelayForward
+	DHCPv6MsgTypeRelayReply
+)
+
+// String returns a string version of a DHCPv6MsgType.
+func (o DHCPv6MsgType) String() string {
+	switch o {
+	case DHCPv6MsgTypeUnspecified:
+		return "Unspecified"
+	case DHCPv6MsgTypeSolicit:
+		return "Solicit"
+	case DHCPv6MsgTypeAdverstise:
+		return "Adverstise"
+	case DHCPv6MsgTypeRequest:
+		return "Request"
+	case DHCPv6MsgTypeConfirm:
+		return "Confirm"
+	case DHCPv6MsgTypeRenew:
+		return "Renew"
+	case DHCPv6MsgTypeRebind:
+		return "Rebind"
+	case DHCPv6MsgTypeReply:
+		return "Reply"
+	case DHCPv6MsgTypeRelease:
+		return "Release"
+	case DHCPv6MsgTypeDecline:
+		return "Decline"
+	case DHCPv6MsgTypeReconfigure:
+		return "Reconfigure"
+	case DHCPv6MsgTypeInformationRequest:
+		return "InformationRequest"
+	case DHCPv6MsgTypeRelayForward:
+		return "RelayForward"
+	case DHCPv6MsgTypeRelayReply:
+		return "RelayReply"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6 contains data for a single DHCP packet.
+type DHCPv6 struct {
+	BaseLayer
+	MsgType       DHCPv6MsgType
+	HopCount      uint8
+	LinkAddr      net.IP
+	PeerAddr      net.IP
+	TransactionID []byte
+	Options       DHCPv6Options
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv6
+func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	d.BaseLayer = BaseLayer{Contents: data}
+	d.Options = d.Options[:0]
+	d.MsgType = DHCPv6MsgType(data[0])
+
+	offset := 0
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		d.HopCount = data[1]
+		d.LinkAddr = net.IP(data[2:18])
+		d.PeerAddr = net.IP(data[18:34])
+		offset = 34
+	} else {
+		d.TransactionID = data[1:4]
+		offset = 4
+	}
+
+	stop := len(data)
+	for offset < stop {
+		o := DHCPv6Option{}
+		if err := o.decode(data[offset:]); err != nil {
+			return err
+		}
+		d.Options = append(d.Options, o)
+		offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+	}
+
+	return nil
+}
+
+// Len returns the length of a DHCPv6 packet.
+func (d *DHCPv6) Len() int {
+	n := 1
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		n += 33
+	} else {
+		n += 3
+	}
+
+	for _, o := range d.Options {
+		n += int(o.Length) + 4 // 2 from option code, 2 from option length
+	}
+
+	return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	plen := int(d.Len())
+
+	data, err := b.PrependBytes(plen)
+	if err != nil {
+		return err
+	}
+
+	offset := 0
+	data[0] = byte(d.MsgType)
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		data[1] = byte(d.HopCount)
+		copy(data[2:18], d.LinkAddr.To16())
+		copy(data[18:34], d.PeerAddr.To16())
+		offset = 34
+	} else {
+		copy(data[1:4], d.TransactionID)
+		offset = 4
+	}
+
+	if len(d.Options) > 0 {
+		for _, o := range d.Options {
+			if err := o.encode(data[offset:], opts); err != nil {
+				return err
+			}
+			offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+		}
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeDHCPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv6) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error {
+	dhcp := &DHCPv6{}
+	err := dhcp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(dhcp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPv6StatusCode represents a DHCP status code - RFC-3315
+type DHCPv6StatusCode uint16
+
+// Constants for the DHCPv6StatusCode.
+const (
+	DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota
+	DHCPv6StatusCodeUnspecFail
+	DHCPv6StatusCodeNoAddrsAvail
+	DHCPv6StatusCodeNoBinding
+	DHCPv6StatusCodeNotOnLink
+	DHCPv6StatusCodeUseMulticast
+)
+
+// String returns a string version of a DHCPv6StatusCode.
+func (o DHCPv6StatusCode) String() string {
+	switch o {
+	case DHCPv6StatusCodeSuccess:
+		return "Success"
+	case DHCPv6StatusCodeUnspecFail:
+		return "UnspecifiedFailure"
+	case DHCPv6StatusCodeNoAddrsAvail:
+		return "NoAddressAvailable"
+	case DHCPv6StatusCodeNoBinding:
+		return "NoBinding"
+	case DHCPv6StatusCodeNotOnLink:
+		return "NotOnLink"
+	case DHCPv6StatusCodeUseMulticast:
+		return "UseMulticast"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6DUIDType represents a DHCP DUID - RFC-3315
+type DHCPv6DUIDType uint16
+
+// Constants for the DHCPv6DUIDType.
+const (
+	DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1
+	DHCPv6DUIDTypeEN
+	DHCPv6DUIDTypeLL
+)
+
+// String returns a string version of a DHCPv6DUIDType.
+func (o DHCPv6DUIDType) String() string {
+	switch o {
+	case DHCPv6DUIDTypeLLT:
+		return "LLT"
+	case DHCPv6DUIDTypeEN:
+		return "EN"
+	case DHCPv6DUIDTypeLL:
+		return "LL"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19)
+type DHCPv6DUID struct {
+	Type DHCPv6DUIDType
+	// LLT, LL
+	HardwareType []byte
+	// EN
+	EnterpriseNumber []byte
+	// LLT
+	Time []byte
+	// LLT, LL
+	LinkLayerAddress net.HardwareAddr
+	// EN
+	Identifier []byte
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error {
+	if len(data) < 2 {
+		return errors.New("Not enough bytes to decode: " + string(len(data)))
+	}
+
+	d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2]))
+	if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+		d.HardwareType = data[2:4]
+	}
+
+	if d.Type == DHCPv6DUIDTypeLLT {
+		d.Time = data[4:8]
+		d.LinkLayerAddress = net.HardwareAddr(data[8:])
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		d.EnterpriseNumber = data[2:6]
+		d.Identifier = data[6:]
+	} else { // DHCPv6DUIDTypeLL
+		d.LinkLayerAddress = net.HardwareAddr(data[4:])
+	}
+
+	return nil
+}
+
+// Encode encodes the DHCPv6DUID in a slice of bytes
+func (d *DHCPv6DUID) Encode() []byte {
+	length := d.Len()
+	data := make([]byte, length)
+	binary.BigEndian.PutUint16(data[0:2], uint16(d.Type))
+
+	if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+		copy(data[2:4], d.HardwareType)
+	}
+
+	if d.Type == DHCPv6DUIDTypeLLT {
+		copy(data[4:8], d.Time)
+		copy(data[8:], d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		copy(data[2:6], d.EnterpriseNumber)
+		copy(data[6:], d.Identifier)
+	} else {
+		copy(data[4:], d.LinkLayerAddress)
+	}
+
+	return data
+}
+
+// Len returns the length of the DHCPv6DUID, respecting the type
+func (d *DHCPv6DUID) Len() int {
+	length := 2 // d.Type
+	if d.Type == DHCPv6DUIDTypeLLT {
+		length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier)
+	} else { // LL
+		length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress)
+	}
+
+	return length
+}
+
+func (d *DHCPv6DUID) String() string {
+	duid := "Type: " + d.Type.String() + ", "
+	if d.Type == DHCPv6DUIDTypeLLT {
+		duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier)
+	} else { // DHCPv6DUIDTypeLL
+		duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress)
+	}
+	return duid
+}
+
+func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) {
+	duid := &DHCPv6DUID{}
+	err := duid.DecodeFromBytes(data)
+	if err != nil {
+		return nil, err
+	}
+	return duid, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
new file mode 100644
index 0000000..0c05e35
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// DHCPv6Opt represents a DHCP option or parameter from RFC-3315
+type DHCPv6Opt uint16
+
+// Constants for the DHCPv6Opt options.
+const (
+	DHCPv6OptClientID           DHCPv6Opt = 1
+	DHCPv6OptServerID           DHCPv6Opt = 2
+	DHCPv6OptIANA               DHCPv6Opt = 3
+	DHCPv6OptIATA               DHCPv6Opt = 4
+	DHCPv6OptIAAddr             DHCPv6Opt = 5
+	DHCPv6OptOro                DHCPv6Opt = 6
+	DHCPv6OptPreference         DHCPv6Opt = 7
+	DHCPv6OptElapsedTime        DHCPv6Opt = 8
+	DHCPv6OptRelayMessage       DHCPv6Opt = 9
+	DHCPv6OptAuth               DHCPv6Opt = 11
+	DHCPv6OptUnicast            DHCPv6Opt = 12
+	DHCPv6OptStatusCode         DHCPv6Opt = 13
+	DHCPv6OptRapidCommit        DHCPv6Opt = 14
+	DHCPv6OptUserClass          DHCPv6Opt = 15
+	DHCPv6OptVendorClass        DHCPv6Opt = 16
+	DHCPv6OptVendorOpts         DHCPv6Opt = 17
+	DHCPv6OptInterfaceID        DHCPv6Opt = 18
+	DHCPv6OptReconfigureMessage DHCPv6Opt = 19
+	DHCPv6OptReconfigureAccept  DHCPv6Opt = 20
+
+	// RFC 3319 Session Initiation Protocol (SIP)
+	DHCPv6OptSIPServersDomainList  DHCPv6Opt = 21
+	DHCPv6OptSIPServersAddressList DHCPv6Opt = 22
+
+	// RFC 3646 DNS Configuration
+	DHCPv6OptDNSServers DHCPv6Opt = 23
+	DHCPv6OptDomainList DHCPv6Opt = 24
+
+	// RFC 3633 Prefix Delegation
+	DHCPv6OptIAPD     DHCPv6Opt = 25
+	DHCPv6OptIAPrefix DHCPv6Opt = 26
+
+	// RFC 3898 Network Information Service (NIS)
+	DHCPv6OptNISServers     DHCPv6Opt = 27
+	DHCPv6OptNISPServers    DHCPv6Opt = 28
+	DHCPv6OptNISDomainName  DHCPv6Opt = 29
+	DHCPv6OptNISPDomainName DHCPv6Opt = 30
+
+	// RFC 4075 Simple Network Time Protocol (SNTP)
+	DHCPv6OptSNTPServers DHCPv6Opt = 31
+
+	// RFC 4242 Information Refresh Time Option
+	DHCPv6OptInformationRefreshTime DHCPv6Opt = 32
+
+	// RFC 4280 Broadcast and Multicast Control Servers
+	DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33
+	DHCPv6OptBCMCSServerAddressList    DHCPv6Opt = 34
+
+	// RFC 4776 Civic Address ConfigurationOption
+	DHCPv6OptGeoconfCivic DHCPv6Opt = 36
+
+	// RFC 4649 Relay Agent Remote-ID
+	DHCPv6OptRemoteID DHCPv6Opt = 37
+
+	// RFC 4580 Relay Agent Subscriber-ID
+	DHCPv6OptSubscriberID DHCPv6Opt = 38
+
+	// RFC 4704 Client Full Qualified Domain Name (FQDN)
+	DHCPv6OptClientFQDN DHCPv6Opt = 39
+
+	// RFC 5192 Protocol for Carrying Authentication for Network Access (PANA)
+	DHCPv6OptPanaAgent DHCPv6Opt = 40
+
+	// RFC 4833 Timezone Options
+	DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41
+	DHCPv6OptNewTZDBTimezone  DHCPv6Opt = 42
+
+	// RFC 4994 Relay Agent Echo Request
+	DHCPv6OptEchoRequestOption DHCPv6Opt = 43
+
+	// RFC 5007 Leasequery
+	DHCPv6OptLQQuery      DHCPv6Opt = 44
+	DHCPv6OptCLTTime      DHCPv6Opt = 45
+	DHCPv6OptClientData   DHCPv6Opt = 46
+	DHCPv6OptLQRelayData  DHCPv6Opt = 47
+	DHCPv6OptLQClientLink DHCPv6Opt = 48
+
+	// RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6)
+	DHCPv6OptMIP6HNIDF DHCPv6Opt = 49
+	DHCPv6OptMIP6VDINF DHCPv6Opt = 50
+	DHCPv6OptMIP6IDINF DHCPv6Opt = 69
+	DHCPv6OptMIP6UDINF DHCPv6Opt = 70
+	DHCPv6OptMIP6HNP   DHCPv6Opt = 71
+	DHCPv6OptMIP6HAA   DHCPv6Opt = 72
+	DHCPv6OptMIP6HAF   DHCPv6Opt = 73
+
+	// RFC 5223 Discovering Location-to-Service Translation (LoST) Servers
+	DHCPv6OptV6LOST DHCPv6Opt = 51
+
+	// RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP)
+	DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52
+
+	// RFC 5460 Bulk Leasequery
+	DHCPv6OptRelayID DHCPv6Opt = 53
+
+	// RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery
+	DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54
+	DHCPv6OptIPv6FQDNMoS    DHCPv6Opt = 55
+
+	// RFC 5908 NTP Server Option
+	DHCPv6OptNTPServer DHCPv6Opt = 56
+
+	// RFC 5986 Discovering the Local Location Information Server (LIS)
+	DHCPv6OptV6AccessDomain DHCPv6Opt = 57
+
+	// RFC 5986 SIP User Agent
+	DHCPv6OptSIPUACSList DHCPv6Opt = 58
+
+	// RFC 5970 Options for Network Boot
+	DHCPv6OptBootFileURL    DHCPv6Opt = 59
+	DHCPv6OptBootFileParam  DHCPv6Opt = 60
+	DHCPv6OptClientArchType DHCPv6Opt = 61
+	DHCPv6OptNII            DHCPv6Opt = 62
+
+	// RFC 6225 Coordinate-Based Location Configuration Information
+	DHCPv6OptGeolocation DHCPv6Opt = 63
+
+	// RFC 6334 Dual-Stack Lite
+	DHCPv6OptAFTRName DHCPv6Opt = 64
+
+	// RFC 6440 EAP Re-authentication Protocol (ERP)
+	DHCPv6OptERPLocalDomainName DHCPv6Opt = 65
+
+	// RFC 6422 Relay-Supplied DHCP Options
+	DHCPv6OptRSOO DHCPv6Opt = 66
+
+	// RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation
+	DHCPv6OptPDExclude DHCPv6Opt = 67
+
+	// RFC 6607 Virtual Subnet Selection
+	DHCPv6OptVSS DHCPv6Opt = 68
+
+	// RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes
+	DHCPv6OptRDNSSSelection DHCPv6Opt = 74
+
+	// RFC 6784 Kerberos Options for DHCPv6
+	DHCPv6OptKRBPrincipalName DHCPv6Opt = 75
+	DHCPv6OptKRBRealmName     DHCPv6Opt = 76
+	DHCPv6OptKRBKDC           DHCPv6Opt = 77
+
+	// RFC 6939 Client Link-Layer Address Option
+	DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79
+
+	// RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents
+	DHCPv6OptLinkAddress DHCPv6Opt = 80
+
+	// RFC 7037 RADIUS Option for the DHCPv6 Relay Agent
+	DHCPv6OptRADIUS DHCPv6Opt = 81
+
+	// RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT
+	DHCPv6OptSolMaxRt DHCPv6Opt = 82
+	DHCPv6OptInfMaxRt DHCPv6Opt = 83
+
+	// RFC 7078 Distributing Address Selection Policy
+	DHCPv6OptAddrSel      DHCPv6Opt = 84
+	DHCPv6OptAddrSelTable DHCPv6Opt = 85
+
+	// RFC 7291 DHCP Options for the Port Control Protocol (PCP)
+	DHCPv6OptV6PCPServer DHCPv6Opt = 86
+
+	// RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport
+	DHCPv6OptDHCPv4Message          DHCPv6Opt = 87
+	DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88
+
+	// RFC 7598 Configuration of Softwire Address and Port-Mapped Clients
+	DHCPv6OptS46Rule           DHCPv6Opt = 89
+	DHCPv6OptS46BR             DHCPv6Opt = 90
+	DHCPv6OptS46DMR            DHCPv6Opt = 91
+	DHCPv6OptS46V4V4Bind       DHCPv6Opt = 92
+	DHCPv6OptS46PortParameters DHCPv6Opt = 93
+	DHCPv6OptS46ContMAPE       DHCPv6Opt = 94
+	DHCPv6OptS46ContMAPT       DHCPv6Opt = 95
+	DHCPv6OptS46ContLW         DHCPv6Opt = 96
+
+	// RFC 7600 IPv4 Residual Deployment via IPv6
+	DHCPv6Opt4RD           DHCPv6Opt = 97
+	DHCPv6Opt4RDMapRule    DHCPv6Opt = 98
+	DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99
+
+	// RFC 7653 Active Leasequery
+	DHCPv6OptLQBaseTime  DHCPv6Opt = 100
+	DHCPv6OptLQStartTime DHCPv6Opt = 101
+	DHCPv6OptLQEndTime   DHCPv6Opt = 102
+
+	// RFC 7710 Captive-Portal Identification
+	DHCPv6OptCaptivePortal DHCPv6Opt = 103
+
+	// RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration
+	DHCPv6OptMPLParameters DHCPv6Opt = 104
+
+	// RFC 7839 Access-Network-Identifier (ANI)
+	DHCPv6OptANIATT           DHCPv6Opt = 105
+	DHCPv6OptANINetworkName   DHCPv6Opt = 106
+	DHCPv6OptANIAPName        DHCPv6Opt = 107
+	DHCPv6OptANIAPBSSID       DHCPv6Opt = 108
+	DHCPv6OptANIOperatorID    DHCPv6Opt = 109
+	DHCPv6OptANIOperatorRealm DHCPv6Opt = 110
+
+	// RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE)
+	DHCPv6OptS46Priority DHCPv6Opt = 111
+
+	// draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD)
+	DHCPv6OptMUDURLV6 DHCPv6Opt = 112
+
+	// RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes
+	DHCPv6OptV6Prefix64 DHCPv6Opt = 113
+
+	// RFC 8156 DHCPv6 Failover Protocol
+	DHCPv6OptFBindingStatus           DHCPv6Opt = 114
+	DHCPv6OptFConnectFlags            DHCPv6Opt = 115
+	DHCPv6OptFDNSRemovalInfo          DHCPv6Opt = 116
+	DHCPv6OptFDNSHostName             DHCPv6Opt = 117
+	DHCPv6OptFDNSZoneName             DHCPv6Opt = 118
+	DHCPv6OptFDNSFlags                DHCPv6Opt = 119
+	DHCPv6OptFExpirationTime          DHCPv6Opt = 120
+	DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121
+	DHCPv6OptFMCLT                    DHCPv6Opt = 122
+	DHCPv6OptFPartnerLifetime         DHCPv6Opt = 123
+	DHCPv6OptFPartnerLifetimeSent     DHCPv6Opt = 124
+	DHCPv6OptFPartnerDownTime         DHCPv6Opt = 125
+	DHCPv6OptFPartnerRawCltTime       DHCPv6Opt = 126
+	DHCPv6OptFProtocolVersion         DHCPv6Opt = 127
+	DHCPv6OptFKeepaliveTime           DHCPv6Opt = 128
+	DHCPv6OptFReconfigureData         DHCPv6Opt = 129
+	DHCPv6OptFRelationshipName        DHCPv6Opt = 130
+	DHCPv6OptFServerFlags             DHCPv6Opt = 131
+	DHCPv6OptFServerState             DHCPv6Opt = 132
+	DHCPv6OptFStartTimeOfState        DHCPv6Opt = 133
+	DHCPv6OptFStateExpirationTime     DHCPv6Opt = 134
+
+	// RFC 8357 Generalized UDP Source Port for DHCP Relay
+	DHCPv6OptRelayPort DHCPv6Opt = 135
+
+	// draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices
+	DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136
+
+	// RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery
+	DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143
+)
+
+// String returns a string version of a DHCPv6Opt.
+func (o DHCPv6Opt) String() string {
+	switch o {
+	case DHCPv6OptClientID:
+		return "ClientID"
+	case DHCPv6OptServerID:
+		return "ServerID"
+	case DHCPv6OptIANA:
+		return "IA_NA"
+	case DHCPv6OptIATA:
+		return "IA_TA"
+	case DHCPv6OptIAAddr:
+		return "IAAddr"
+	case DHCPv6OptOro:
+		return "Oro"
+	case DHCPv6OptPreference:
+		return "Preference"
+	case DHCPv6OptElapsedTime:
+		return "ElapsedTime"
+	case DHCPv6OptRelayMessage:
+		return "RelayMessage"
+	case DHCPv6OptAuth:
+		return "Auth"
+	case DHCPv6OptUnicast:
+		return "Unicast"
+	case DHCPv6OptStatusCode:
+		return "StatusCode"
+	case DHCPv6OptRapidCommit:
+		return "RapidCommit"
+	case DHCPv6OptUserClass:
+		return "UserClass"
+	case DHCPv6OptVendorClass:
+		return "VendorClass"
+	case DHCPv6OptVendorOpts:
+		return "VendorOpts"
+	case DHCPv6OptInterfaceID:
+		return "InterfaceID"
+	case DHCPv6OptReconfigureMessage:
+		return "ReconfigureMessage"
+	case DHCPv6OptReconfigureAccept:
+		return "ReconfigureAccept"
+	case DHCPv6OptSIPServersDomainList:
+		return "SIPServersDomainList"
+	case DHCPv6OptSIPServersAddressList:
+		return "SIPServersAddressList"
+	case DHCPv6OptDNSServers:
+		return "DNSRecursiveNameServer"
+	case DHCPv6OptDomainList:
+		return "DomainSearchList"
+	case DHCPv6OptIAPD:
+		return "IdentityAssociationPrefixDelegation"
+	case DHCPv6OptIAPrefix:
+		return "IAPDPrefix"
+	case DHCPv6OptNISServers:
+		return "NISServers"
+	case DHCPv6OptNISPServers:
+		return "NISv2Servers"
+	case DHCPv6OptNISDomainName:
+		return "NISDomainName"
+	case DHCPv6OptNISPDomainName:
+		return "NISv2DomainName"
+	case DHCPv6OptSNTPServers:
+		return "SNTPServers"
+	case DHCPv6OptInformationRefreshTime:
+		return "InformationRefreshTime"
+	case DHCPv6OptBCMCSServerDomainNameList:
+		return "BCMCSControlServersDomainNameList"
+	case DHCPv6OptBCMCSServerAddressList:
+		return "BCMCSControlServersAddressList"
+	case DHCPv6OptGeoconfCivic:
+		return "CivicAddress"
+	case DHCPv6OptRemoteID:
+		return "RelayAgentRemoteID"
+	case DHCPv6OptSubscriberID:
+		return "RelayAgentSubscriberID"
+	case DHCPv6OptClientFQDN:
+		return "ClientFQDN"
+	case DHCPv6OptPanaAgent:
+		return "PANAAuthenticationAgent"
+	case DHCPv6OptNewPOSIXTimezone:
+		return "NewPOSIXTimezone"
+	case DHCPv6OptNewTZDBTimezone:
+		return "NewTZDBTimezone"
+	case DHCPv6OptEchoRequestOption:
+		return "EchoRequest"
+	case DHCPv6OptLQQuery:
+		return "LeasequeryQuery"
+	case DHCPv6OptClientData:
+		return "LeasequeryClientData"
+	case DHCPv6OptCLTTime:
+		return "LeasequeryClientLastTransactionTime"
+	case DHCPv6OptLQRelayData:
+		return "LeasequeryRelayData"
+	case DHCPv6OptLQClientLink:
+		return "LeasequeryClientLink"
+	case DHCPv6OptMIP6HNIDF:
+		return "MIPv6HomeNetworkIDFQDN"
+	case DHCPv6OptMIP6VDINF:
+		return "MIPv6VisitedHomeNetworkInformation"
+	case DHCPv6OptMIP6IDINF:
+		return "MIPv6IdentifiedHomeNetworkInformation"
+	case DHCPv6OptMIP6UDINF:
+		return "MIPv6UnrestrictedHomeNetworkInformation"
+	case DHCPv6OptMIP6HNP:
+		return "MIPv6HomeNetworkPrefix"
+	case DHCPv6OptMIP6HAA:
+		return "MIPv6HomeAgentAddress"
+	case DHCPv6OptMIP6HAF:
+		return "MIPv6HomeAgentFQDN"
+	case DHCPv6OptV6LOST:
+		return "LoST Server"
+	case DHCPv6OptCAPWAPACV6:
+		return "CAPWAPAccessControllerV6"
+	case DHCPv6OptRelayID:
+		return "LeasequeryRelayID"
+	case DHCPv6OptIPv6AddressMoS:
+		return "MoSIPv6Address"
+	case DHCPv6OptIPv6FQDNMoS:
+		return "MoSDomainNameList"
+	case DHCPv6OptNTPServer:
+		return "NTPServer"
+	case DHCPv6OptV6AccessDomain:
+		return "AccessNetworkDomainName"
+	case DHCPv6OptSIPUACSList:
+		return "SIPUserAgentConfigurationServiceDomains"
+	case DHCPv6OptBootFileURL:
+		return "BootFileURL"
+	case DHCPv6OptBootFileParam:
+		return "BootFileParameters"
+	case DHCPv6OptClientArchType:
+		return "ClientSystemArchitectureType"
+	case DHCPv6OptNII:
+		return "ClientNetworkInterfaceIdentifier"
+	case DHCPv6OptGeolocation:
+		return "Geolocation"
+	case DHCPv6OptAFTRName:
+		return "AFTRName"
+	case DHCPv6OptERPLocalDomainName:
+		return "AFTRName"
+	case DHCPv6OptRSOO:
+		return "RSOOption"
+	case DHCPv6OptPDExclude:
+		return "PrefixExclude"
+	case DHCPv6OptVSS:
+		return "VirtualSubnetSelection"
+	case DHCPv6OptRDNSSSelection:
+		return "RDNSSSelection"
+	case DHCPv6OptKRBPrincipalName:
+		return "KerberosPrincipalName"
+	case DHCPv6OptKRBRealmName:
+		return "KerberosRealmName"
+	case DHCPv6OptKRBKDC:
+		return "KerberosKDC"
+	case DHCPv6OptClientLinkLayerAddress:
+		return "ClientLinkLayerAddress"
+	case DHCPv6OptLinkAddress:
+		return "LinkAddress"
+	case DHCPv6OptRADIUS:
+		return "RADIUS"
+	case DHCPv6OptSolMaxRt:
+		return "SolMaxRt"
+	case DHCPv6OptInfMaxRt:
+		return "InfMaxRt"
+	case DHCPv6OptAddrSel:
+		return "AddressSelection"
+	case DHCPv6OptAddrSelTable:
+		return "AddressSelectionTable"
+	case DHCPv6OptV6PCPServer:
+		return "PCPServer"
+	case DHCPv6OptDHCPv4Message:
+		return "DHCPv4Message"
+	case DHCPv6OptDHCPv4OverDHCPv6Server:
+		return "DHCP4o6ServerAddress"
+	case DHCPv6OptS46Rule:
+		return "S46Rule"
+	case DHCPv6OptS46BR:
+		return "S46BR"
+	case DHCPv6OptS46DMR:
+		return "S46DMR"
+	case DHCPv6OptS46V4V4Bind:
+		return "S46IPv4IPv6AddressBinding"
+	case DHCPv6OptS46PortParameters:
+		return "S46PortParameters"
+	case DHCPv6OptS46ContMAPE:
+		return "S46MAPEContainer"
+	case DHCPv6OptS46ContMAPT:
+		return "S46MAPTContainer"
+	case DHCPv6OptS46ContLW:
+		return "S46Lightweight4Over6Container"
+	case DHCPv6Opt4RD:
+		return "4RD"
+	case DHCPv6Opt4RDMapRule:
+		return "4RDMapRule"
+	case DHCPv6Opt4RDNonMapRule:
+		return "4RDNonMapRule"
+	case DHCPv6OptLQBaseTime:
+		return "LQBaseTime"
+	case DHCPv6OptLQStartTime:
+		return "LQStartTime"
+	case DHCPv6OptLQEndTime:
+		return "LQEndTime"
+	case DHCPv6OptCaptivePortal:
+		return "CaptivePortal"
+	case DHCPv6OptMPLParameters:
+		return "MPLParameterConfiguration"
+	case DHCPv6OptANIATT:
+		return "ANIAccessTechnologyType"
+	case DHCPv6OptANINetworkName:
+		return "ANINetworkName"
+	case DHCPv6OptANIAPName:
+		return "ANIAccessPointName"
+	case DHCPv6OptANIAPBSSID:
+		return "ANIAccessPointBSSID"
+	case DHCPv6OptANIOperatorID:
+		return "ANIOperatorIdentifier"
+	case DHCPv6OptANIOperatorRealm:
+		return "ANIOperatorRealm"
+	case DHCPv6OptS46Priority:
+		return "S64Priority"
+	case DHCPv6OptMUDURLV6:
+		return "ManufacturerUsageDescriptionURL"
+	case DHCPv6OptV6Prefix64:
+		return "V6Prefix64"
+	case DHCPv6OptFBindingStatus:
+		return "FailoverBindingStatus"
+	case DHCPv6OptFConnectFlags:
+		return "FailoverConnectFlags"
+	case DHCPv6OptFDNSRemovalInfo:
+		return "FailoverDNSRemovalInfo"
+	case DHCPv6OptFDNSHostName:
+		return "FailoverDNSHostName"
+	case DHCPv6OptFDNSZoneName:
+		return "FailoverDNSZoneName"
+	case DHCPv6OptFDNSFlags:
+		return "FailoverDNSFlags"
+	case DHCPv6OptFExpirationTime:
+		return "FailoverExpirationTime"
+	case DHCPv6OptFMaxUnacknowledgedBNDUPD:
+		return "FailoverMaxUnacknowledgedBNDUPDMessages"
+	case DHCPv6OptFMCLT:
+		return "FailoverMaximumClientLeadTime"
+	case DHCPv6OptFPartnerLifetime:
+		return "FailoverPartnerLifetime"
+	case DHCPv6OptFPartnerLifetimeSent:
+		return "FailoverPartnerLifetimeSent"
+	case DHCPv6OptFPartnerDownTime:
+		return "FailoverPartnerDownTime"
+	case DHCPv6OptFPartnerRawCltTime:
+		return "FailoverPartnerRawClientLeadTime"
+	case DHCPv6OptFProtocolVersion:
+		return "FailoverProtocolVersion"
+	case DHCPv6OptFKeepaliveTime:
+		return "FailoverKeepaliveTime"
+	case DHCPv6OptFReconfigureData:
+		return "FailoverReconfigureData"
+	case DHCPv6OptFRelationshipName:
+		return "FailoverRelationshipName"
+	case DHCPv6OptFServerFlags:
+		return "FailoverServerFlags"
+	case DHCPv6OptFServerState:
+		return "FailoverServerState"
+	case DHCPv6OptFStartTimeOfState:
+		return "FailoverStartTimeOfState"
+	case DHCPv6OptFStateExpirationTime:
+		return "FailoverStateExpirationTime"
+	case DHCPv6OptRelayPort:
+		return "RelayPort"
+	case DHCPv6OptV6ZeroTouchRedirect:
+		return "ZeroTouch"
+	case DHCPv6OptIPV6AddressANDSF:
+		return "ANDSFIPv6Address"
+	default:
+		return fmt.Sprintf("Unknown(%d)", uint16(o))
+	}
+}
+
+// DHCPv6Options is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPv6Options []DHCPv6Option
+
+// String returns a string version of the options list.
+func (o DHCPv6Options) String() string {
+	buf := &bytes.Buffer{}
+	buf.WriteByte('[')
+	for i, opt := range o {
+		buf.WriteString(opt.String())
+		if i+1 != len(o) {
+			buf.WriteString(", ")
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// DHCPv6Option rerpresents a DHCP option.
+type DHCPv6Option struct {
+	Code   DHCPv6Opt
+	Length uint16
+	Data   []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPv6Option) String() string {
+	switch o.Code {
+	case DHCPv6OptClientID, DHCPv6OptServerID:
+		duid, err := decodeDHCPv6DUID(o.Data)
+		if err != nil {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Code)
+		}
+		return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String())
+	case DHCPv6OptOro:
+		options := ""
+		for i := 0; i < int(o.Length); i += 2 {
+			if options != "" {
+				options += ","
+			}
+			option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2]))
+			options += option.String()
+		}
+		return fmt.Sprintf("Option(%s:[%s])", o.Code, options)
+	default:
+		return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data)
+	}
+}
+
+// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data.
+func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option {
+	o := DHCPv6Option{Code: code}
+	if data != nil {
+		o.Data = data
+		o.Length = uint16(len(data))
+	}
+
+	return o
+}
+
+func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error {
+	binary.BigEndian.PutUint16(b[0:2], uint16(o.Code))
+	if opts.FixLengths {
+		binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data)))
+	} else {
+		binary.BigEndian.PutUint16(b[2:4], o.Length)
+	}
+	copy(b[4:], o.Data)
+
+	return nil
+}
+
+func (o *DHCPv6Option) decode(data []byte) error {
+	if len(data) < 2 {
+		return errors.New("not enough data to decode")
+	}
+	o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2]))
+	if len(data) < 3 {
+		return errors.New("not enough data to decode")
+	}
+	o.Length = binary.BigEndian.Uint16(data[2:4])
+	o.Data = data[4 : 4+o.Length]
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go
new file mode 100644
index 0000000..a0b2d72
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dns.go
@@ -0,0 +1,1053 @@
+// Copyright 2014, 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// DNSClass defines the class associated with a request/response.  Different DNS
+// classes can be thought of as an array of parallel namespace trees.
+type DNSClass uint16
+
+// DNSClass known values.
+const (
+	DNSClassIN  DNSClass = 1   // Internet
+	DNSClassCS  DNSClass = 2   // the CSNET class (Obsolete)
+	DNSClassCH  DNSClass = 3   // the CHAOS class
+	DNSClassHS  DNSClass = 4   // Hesiod [Dyer 87]
+	DNSClassAny DNSClass = 255 // AnyClass
+)
+
+func (dc DNSClass) String() string {
+	switch dc {
+	default:
+		return "Unknown"
+	case DNSClassIN:
+		return "IN"
+	case DNSClassCS:
+		return "CS"
+	case DNSClassCH:
+		return "CH"
+	case DNSClassHS:
+		return "HS"
+	case DNSClassAny:
+		return "Any"
+	}
+}
+
+// DNSType defines the type of data being requested/returned in a
+// question/answer.
+type DNSType uint16
+
+// DNSType known values.
+const (
+	DNSTypeA     DNSType = 1  // a host address
+	DNSTypeNS    DNSType = 2  // an authoritative name server
+	DNSTypeMD    DNSType = 3  // a mail destination (Obsolete - use MX)
+	DNSTypeMF    DNSType = 4  // a mail forwarder (Obsolete - use MX)
+	DNSTypeCNAME DNSType = 5  // the canonical name for an alias
+	DNSTypeSOA   DNSType = 6  // marks the start of a zone of authority
+	DNSTypeMB    DNSType = 7  // a mailbox domain name (EXPERIMENTAL)
+	DNSTypeMG    DNSType = 8  // a mail group member (EXPERIMENTAL)
+	DNSTypeMR    DNSType = 9  // a mail rename domain name (EXPERIMENTAL)
+	DNSTypeNULL  DNSType = 10 // a null RR (EXPERIMENTAL)
+	DNSTypeWKS   DNSType = 11 // a well known service description
+	DNSTypePTR   DNSType = 12 // a domain name pointer
+	DNSTypeHINFO DNSType = 13 // host information
+	DNSTypeMINFO DNSType = 14 // mailbox or mail list information
+	DNSTypeMX    DNSType = 15 // mail exchange
+	DNSTypeTXT   DNSType = 16 // text strings
+	DNSTypeAAAA  DNSType = 28 // a IPv6 host address [RFC3596]
+	DNSTypeSRV   DNSType = 33 // server discovery [RFC2782] [RFC6195]
+	DNSTypeOPT   DNSType = 41 // OPT Pseudo-RR [RFC6891]
+)
+
+func (dt DNSType) String() string {
+	switch dt {
+	default:
+		return "Unknown"
+	case DNSTypeA:
+		return "A"
+	case DNSTypeNS:
+		return "NS"
+	case DNSTypeMD:
+		return "MD"
+	case DNSTypeMF:
+		return "MF"
+	case DNSTypeCNAME:
+		return "CNAME"
+	case DNSTypeSOA:
+		return "SOA"
+	case DNSTypeMB:
+		return "MB"
+	case DNSTypeMG:
+		return "MG"
+	case DNSTypeMR:
+		return "MR"
+	case DNSTypeNULL:
+		return "NULL"
+	case DNSTypeWKS:
+		return "WKS"
+	case DNSTypePTR:
+		return "PTR"
+	case DNSTypeHINFO:
+		return "HINFO"
+	case DNSTypeMINFO:
+		return "MINFO"
+	case DNSTypeMX:
+		return "MX"
+	case DNSTypeTXT:
+		return "TXT"
+	case DNSTypeAAAA:
+		return "AAAA"
+	case DNSTypeSRV:
+		return "SRV"
+	case DNSTypeOPT:
+		return "OPT"
+	}
+}
+
+// DNSResponseCode provides response codes for question answers.
+type DNSResponseCode uint8
+
+// DNSResponseCode known values.
+const (
+	DNSResponseCodeNoErr    DNSResponseCode = 0  // No error
+	DNSResponseCodeFormErr  DNSResponseCode = 1  // Format Error                       [RFC1035]
+	DNSResponseCodeServFail DNSResponseCode = 2  // Server Failure                     [RFC1035]
+	DNSResponseCodeNXDomain DNSResponseCode = 3  // Non-Existent Domain                [RFC1035]
+	DNSResponseCodeNotImp   DNSResponseCode = 4  // Not Implemented                    [RFC1035]
+	DNSResponseCodeRefused  DNSResponseCode = 5  // Query Refused                      [RFC1035]
+	DNSResponseCodeYXDomain DNSResponseCode = 6  // Name Exists when it should not     [RFC2136]
+	DNSResponseCodeYXRRSet  DNSResponseCode = 7  // RR Set Exists when it should not   [RFC2136]
+	DNSResponseCodeNXRRSet  DNSResponseCode = 8  // RR Set that should exist does not  [RFC2136]
+	DNSResponseCodeNotAuth  DNSResponseCode = 9  // Server Not Authoritative for zone  [RFC2136]
+	DNSResponseCodeNotZone  DNSResponseCode = 10 // Name not contained in zone         [RFC2136]
+	DNSResponseCodeBadVers  DNSResponseCode = 16 // Bad OPT Version                    [RFC2671]
+	DNSResponseCodeBadSig   DNSResponseCode = 16 // TSIG Signature Failure             [RFC2845]
+	DNSResponseCodeBadKey   DNSResponseCode = 17 // Key not recognized                 [RFC2845]
+	DNSResponseCodeBadTime  DNSResponseCode = 18 // Signature out of time window       [RFC2845]
+	DNSResponseCodeBadMode  DNSResponseCode = 19 // Bad TKEY Mode                      [RFC2930]
+	DNSResponseCodeBadName  DNSResponseCode = 20 // Duplicate key name                 [RFC2930]
+	DNSResponseCodeBadAlg   DNSResponseCode = 21 // Algorithm not supported            [RFC2930]
+	DNSResponseCodeBadTruc  DNSResponseCode = 22 // Bad Truncation                     [RFC4635]
+)
+
+func (drc DNSResponseCode) String() string {
+	switch drc {
+	default:
+		return "Unknown"
+	case DNSResponseCodeNoErr:
+		return "No Error"
+	case DNSResponseCodeFormErr:
+		return "Format Error"
+	case DNSResponseCodeServFail:
+		return "Server Failure "
+	case DNSResponseCodeNXDomain:
+		return "Non-Existent Domain"
+	case DNSResponseCodeNotImp:
+		return "Not Implemented"
+	case DNSResponseCodeRefused:
+		return "Query Refused"
+	case DNSResponseCodeYXDomain:
+		return "Name Exists when it should not"
+	case DNSResponseCodeYXRRSet:
+		return "RR Set Exists when it should not"
+	case DNSResponseCodeNXRRSet:
+		return "RR Set that should exist does not"
+	case DNSResponseCodeNotAuth:
+		return "Server Not Authoritative for zone"
+	case DNSResponseCodeNotZone:
+		return "Name not contained in zone"
+	case DNSResponseCodeBadVers:
+		return "Bad OPT Version"
+	case DNSResponseCodeBadKey:
+		return "Key not recognized"
+	case DNSResponseCodeBadTime:
+		return "Signature out of time window"
+	case DNSResponseCodeBadMode:
+		return "Bad TKEY Mode"
+	case DNSResponseCodeBadName:
+		return "Duplicate key name"
+	case DNSResponseCodeBadAlg:
+		return "Algorithm not supported"
+	case DNSResponseCodeBadTruc:
+		return "Bad Truncation"
+	}
+}
+
+// DNSOpCode defines a set of different operation types.
+type DNSOpCode uint8
+
+// DNSOpCode known values.
+const (
+	DNSOpCodeQuery  DNSOpCode = 0 // Query                  [RFC1035]
+	DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425]
+	DNSOpCodeStatus DNSOpCode = 2 // Status                 [RFC1035]
+	DNSOpCodeNotify DNSOpCode = 4 // Notify                 [RFC1996]
+	DNSOpCodeUpdate DNSOpCode = 5 // Update                 [RFC2136]
+)
+
+func (doc DNSOpCode) String() string {
+	switch doc {
+	default:
+		return "Unknown"
+	case DNSOpCodeQuery:
+		return "Query"
+	case DNSOpCodeIQuery:
+		return "Inverse Query"
+	case DNSOpCodeStatus:
+		return "Status"
+	case DNSOpCodeNotify:
+		return "Notify"
+	case DNSOpCodeUpdate:
+		return "Update"
+	}
+}
+
+// DNS is specified in RFC 1034 / RFC 1035
+// +---------------------+
+// |        Header       |
+// +---------------------+
+// |       Question      | the question for the name server
+// +---------------------+
+// |        Answer       | RRs answering the question
+// +---------------------+
+// |      Authority      | RRs pointing toward an authority
+// +---------------------+
+// |      Additional     | RRs holding additional information
+// +---------------------+
+//
+//  DNS Header
+//  0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      ID                       |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |QR|   Opcode  |AA|TC|RD|RA|   Z    |   RCODE   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    QDCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    ANCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    NSCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    ARCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNS contains data from a single Domain Name Service packet.
+type DNS struct {
+	BaseLayer
+
+	// Header fields
+	ID     uint16
+	QR     bool
+	OpCode DNSOpCode
+
+	AA bool  // Authoritative answer
+	TC bool  // Truncated
+	RD bool  // Recursion desired
+	RA bool  // Recursion available
+	Z  uint8 // Reserved for future use
+
+	ResponseCode DNSResponseCode
+	QDCount      uint16 // Number of questions to expect
+	ANCount      uint16 // Number of answers to expect
+	NSCount      uint16 // Number of authorities to expect
+	ARCount      uint16 // Number of additional records to expect
+
+	// Entries
+	Questions   []DNSQuestion
+	Answers     []DNSResourceRecord
+	Authorities []DNSResourceRecord
+	Additionals []DNSResourceRecord
+
+	// buffer for doing name decoding.  We use a single reusable buffer to avoid
+	// name decoding on a single object via multiple DecodeFromBytes calls
+	// requiring constant allocation of small byte slices.
+	buffer []byte
+}
+
+// LayerType returns gopacket.LayerTypeDNS.
+func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS }
+
+// decodeDNS decodes the byte slice into a DNS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeDNS(data []byte, p gopacket.PacketBuilder) error {
+	d := &DNS{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+	return nil
+}
+
+// DecodeFromBytes decodes the slice into the DNS struct.
+func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	d.buffer = d.buffer[:0]
+
+	if len(data) < 12 {
+		df.SetTruncated()
+		return errDNSPacketTooShort
+	}
+
+	// since there are no further layers, the baselayer's content is
+	// pointing to this layer
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+	d.ID = binary.BigEndian.Uint16(data[:2])
+	d.QR = data[2]&0x80 != 0
+	d.OpCode = DNSOpCode(data[2]>>3) & 0x0F
+	d.AA = data[2]&0x04 != 0
+	d.TC = data[2]&0x02 != 0
+	d.RD = data[2]&0x01 != 0
+	d.RA = data[3]&0x80 != 0
+	d.Z = uint8(data[3]>>4) & 0x7
+	d.ResponseCode = DNSResponseCode(data[3] & 0xF)
+	d.QDCount = binary.BigEndian.Uint16(data[4:6])
+	d.ANCount = binary.BigEndian.Uint16(data[6:8])
+	d.NSCount = binary.BigEndian.Uint16(data[8:10])
+	d.ARCount = binary.BigEndian.Uint16(data[10:12])
+
+	d.Questions = d.Questions[:0]
+	d.Answers = d.Answers[:0]
+	d.Authorities = d.Authorities[:0]
+	d.Additionals = d.Additionals[:0]
+
+	offset := 12
+	var err error
+	for i := 0; i < int(d.QDCount); i++ {
+		var q DNSQuestion
+		if offset, err = q.decode(data, offset, df, &d.buffer); err != nil {
+			return err
+		}
+		d.Questions = append(d.Questions, q)
+	}
+
+	// For some horrible reason, if we do the obvious thing in this loop:
+	//   var r DNSResourceRecord
+	//   if blah := r.decode(blah); err != nil {
+	//     return err
+	//   }
+	//   d.Foo = append(d.Foo, r)
+	// the Go compiler thinks that 'r' escapes to the heap, causing a malloc for
+	// every Answer, Authority, and Additional.  To get around this, we do
+	// something really silly:  we append an empty resource record to our slice,
+	// then use the last value in the slice to call decode.  Since the value is
+	// already in the slice, there's no WAY it can escape... on the other hand our
+	// code is MUCH uglier :(
+	for i := 0; i < int(d.ANCount); i++ {
+		d.Answers = append(d.Answers, DNSResourceRecord{})
+		if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Answers = d.Answers[:i] // strip off erroneous value
+			return err
+		}
+	}
+	for i := 0; i < int(d.NSCount); i++ {
+		d.Authorities = append(d.Authorities, DNSResourceRecord{})
+		if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Authorities = d.Authorities[:i] // strip off erroneous value
+			return err
+		}
+	}
+	for i := 0; i < int(d.ARCount); i++ {
+		d.Additionals = append(d.Additionals, DNSResourceRecord{})
+		if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Additionals = d.Additionals[:i] // strip off erroneous value
+			return err
+		}
+	}
+
+	if uint16(len(d.Questions)) != d.QDCount {
+		return errDecodeQueryBadQDCount
+	} else if uint16(len(d.Answers)) != d.ANCount {
+		return errDecodeQueryBadANCount
+	} else if uint16(len(d.Authorities)) != d.NSCount {
+		return errDecodeQueryBadNSCount
+	} else if uint16(len(d.Additionals)) != d.ARCount {
+		return errDecodeQueryBadARCount
+	}
+	return nil
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (d *DNS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDNS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (d *DNS) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// Payload returns nil.
+func (d *DNS) Payload() []byte {
+	return nil
+}
+
+func b2i(b bool) int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func recSize(rr *DNSResourceRecord) int {
+	switch rr.Type {
+	case DNSTypeA:
+		return 4
+	case DNSTypeAAAA:
+		return 16
+	case DNSTypeNS:
+		return len(rr.NS) + 2
+	case DNSTypeCNAME:
+		return len(rr.CNAME) + 2
+	case DNSTypePTR:
+		return len(rr.PTR) + 2
+	case DNSTypeSOA:
+		return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20
+	case DNSTypeMX:
+		return 2 + len(rr.MX.Name) + 2
+	case DNSTypeTXT:
+		l := len(rr.TXTs)
+		for _, txt := range rr.TXTs {
+			l += len(txt)
+		}
+		return l
+	case DNSTypeSRV:
+		return 6 + len(rr.SRV.Name) + 2
+	case DNSTypeOPT:
+		l := len(rr.OPT) * 4
+		for _, opt := range rr.OPT {
+			l += len(opt.Data)
+		}
+		return l
+	}
+
+	return 0
+}
+
+func computeSize(recs []DNSResourceRecord) int {
+	sz := 0
+	for _, rr := range recs {
+		v := len(rr.Name)
+
+		if v == 0 {
+			sz += v + 11
+		} else {
+			sz += v + 12
+		}
+
+		sz += recSize(&rr)
+	}
+	return sz
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	dsz := 0
+	for _, q := range d.Questions {
+		dsz += len(q.Name) + 6
+	}
+	dsz += computeSize(d.Answers)
+	dsz += computeSize(d.Authorities)
+	dsz += computeSize(d.Additionals)
+
+	bytes, err := b.PrependBytes(12 + dsz)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, d.ID)
+	bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD))
+	bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode))
+
+	if opts.FixLengths {
+		d.QDCount = uint16(len(d.Questions))
+		d.ANCount = uint16(len(d.Answers))
+		d.NSCount = uint16(len(d.Authorities))
+		d.ARCount = uint16(len(d.Additionals))
+	}
+	binary.BigEndian.PutUint16(bytes[4:], d.QDCount)
+	binary.BigEndian.PutUint16(bytes[6:], d.ANCount)
+	binary.BigEndian.PutUint16(bytes[8:], d.NSCount)
+	binary.BigEndian.PutUint16(bytes[10:], d.ARCount)
+
+	off := 12
+	for _, qd := range d.Questions {
+		n := qd.encode(bytes, off)
+		off += n
+	}
+
+	for i := range d.Answers {
+		// done this way so we can modify DNSResourceRecord to fix
+		// lengths if requested
+		qa := &d.Answers[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+
+	for i := range d.Authorities {
+		qa := &d.Authorities[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+	for i := range d.Additionals {
+		qa := &d.Additionals[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+
+	return nil
+}
+
+const maxRecursionLevel = 255
+
+func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) {
+	if level > maxRecursionLevel {
+		return nil, 0, errMaxRecursion
+	} else if offset >= len(data) {
+		return nil, 0, errDNSNameOffsetTooHigh
+	} else if offset < 0 {
+		return nil, 0, errDNSNameOffsetNegative
+	}
+	start := len(*buffer)
+	index := offset
+	if data[index] == 0x00 {
+		return nil, index + 1, nil
+	}
+loop:
+	for data[index] != 0x00 {
+		switch data[index] & 0xc0 {
+		default:
+			/* RFC 1035
+			   A domain name represented as a sequence of labels, where
+			   each label consists of a length octet followed by that
+			   number of octets.  The domain name terminates with the
+			   zero length octet for the null label of the root.  Note
+			   that this field may be an odd number of octets; no
+			   padding is used.
+			*/
+			index2 := index + int(data[index]) + 1
+			if index2-offset > 255 {
+				return nil, 0, errDNSNameTooLong
+			} else if index2 < index+1 || index2 > len(data) {
+				return nil, 0, errDNSNameInvalidIndex
+			}
+			*buffer = append(*buffer, '.')
+			*buffer = append(*buffer, data[index+1:index2]...)
+			index = index2
+
+		case 0xc0:
+			/* RFC 1035
+			   The pointer takes the form of a two octet sequence.
+
+			   The first two bits are ones.  This allows a pointer to
+			   be distinguished from a label, since the label must
+			   begin with two zero bits because labels are restricted
+			   to 63 octets or less.  (The 10 and 01 combinations are
+			   reserved for future use.)  The OFFSET field specifies
+			   an offset from the start of the message (i.e., the
+			   first octet of the ID field in the domain header).  A
+			   zero offset specifies the first byte of the ID field,
+			   etc.
+
+			   The compression scheme allows a domain name in a message to be
+			   represented as either:
+			      - a sequence of labels ending in a zero octet
+			      - a pointer
+			      - a sequence of labels ending with a pointer
+			*/
+			if index+2 > len(data) {
+				return nil, 0, errDNSPointerOffsetTooHigh
+			}
+			offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff)
+			if offsetp > len(data) {
+				return nil, 0, errDNSPointerOffsetTooHigh
+			}
+			// This looks a little tricky, but actually isn't.  Because of how
+			// decodeName is written, calling it appends the decoded name to the
+			// current buffer.  We already have the start of the buffer, then, so
+			// once this call is done buffer[start:] will contain our full name.
+			_, _, err := decodeName(data, offsetp, buffer, level+1)
+			if err != nil {
+				return nil, 0, err
+			}
+			index++ // pointer is two bytes, so add an extra byte here.
+			break loop
+		/* EDNS, or other DNS option ? */
+		case 0x40: // RFC 2673
+			return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)",
+				data[index], index)
+
+		case 0x80:
+			return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)",
+				data[index], index)
+		}
+		if index >= len(data) {
+			return nil, 0, errDNSIndexOutOfRange
+		}
+	}
+	if len(*buffer) <= start {
+		return (*buffer)[start:], index + 1, nil
+	}
+	return (*buffer)[start+1:], index + 1, nil
+}
+
+// DNSQuestion wraps a single request (question) within a DNS query.
+type DNSQuestion struct {
+	Name  []byte
+	Type  DNSType
+	Class DNSClass
+}
+
+func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+	name, endq, err := decodeName(data, offset, buffer, 1)
+	if err != nil {
+		return 0, err
+	}
+
+	q.Name = name
+	q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+	q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+
+	return endq + 4, nil
+}
+
+func (q *DNSQuestion) encode(data []byte, offset int) int {
+	noff := encodeName(q.Name, data, offset)
+	nSz := noff - offset
+	binary.BigEndian.PutUint16(data[noff:], uint16(q.Type))
+	binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class))
+	return nSz + 4
+}
+
+//  DNSResourceRecord
+//  0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                                               |
+//  /                                               /
+//  /                      NAME                     /
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TYPE                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     CLASS                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TTL                      |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   RDLENGTH                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+//  /                     RDATA                     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNSResourceRecord wraps the data from a single DNS resource within a
+// response.
+type DNSResourceRecord struct {
+	// Header
+	Name  []byte
+	Type  DNSType
+	Class DNSClass
+	TTL   uint32
+
+	// RDATA Raw Values
+	DataLength uint16
+	Data       []byte
+
+	// RDATA Decoded Values
+	IP             net.IP
+	NS, CNAME, PTR []byte
+	TXTs           [][]byte
+	SOA            DNSSOA
+	SRV            DNSSRV
+	MX             DNSMX
+	OPT            []DNSOPT // See RFC 6891, section 6.1.2
+
+	// Undecoded TXT for backward compatibility
+	TXT []byte
+}
+
+// decode decodes the resource record, returning the total length of the record.
+func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+	name, endq, err := decodeName(data, offset, buffer, 1)
+	if err != nil {
+		return 0, err
+	}
+
+	rr.Name = name
+	rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+	rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+	rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+	rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10])
+	end := endq + 10 + int(rr.DataLength)
+	if end > len(data) {
+		return 0, errDecodeRecordLength
+	}
+	rr.Data = data[endq+10 : end]
+
+	if err = rr.decodeRData(data, endq+10, buffer); err != nil {
+		return 0, err
+	}
+
+	return endq + 10 + int(rr.DataLength), nil
+}
+
+func encodeName(name []byte, data []byte, offset int) int {
+	l := 0
+	for i := range name {
+		if name[i] == '.' {
+			data[offset+i-l] = byte(l)
+			l = 0
+		} else {
+			// skip one to write the length
+			data[offset+i+1] = name[i]
+			l++
+		}
+	}
+
+	if len(name) == 0 {
+		data[offset] = 0x00 // terminal
+		return offset + 1
+	}
+
+	// length for final portion
+	data[offset+len(name)-l] = byte(l)
+	data[offset+len(name)+1] = 0x00 // terminal
+	return offset + len(name) + 2
+}
+
+func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) {
+
+	noff := encodeName(rr.Name, data, offset)
+	nSz := noff - offset
+
+	binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type))
+	binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class))
+	binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL))
+
+	switch rr.Type {
+	case DNSTypeA:
+		copy(data[noff+10:], rr.IP.To4())
+	case DNSTypeAAAA:
+		copy(data[noff+10:], rr.IP)
+	case DNSTypeNS:
+		encodeName(rr.NS, data, noff+10)
+	case DNSTypeCNAME:
+		encodeName(rr.CNAME, data, noff+10)
+	case DNSTypePTR:
+		encodeName(rr.PTR, data, noff+10)
+	case DNSTypeSOA:
+		noff2 := encodeName(rr.SOA.MName, data, noff+10)
+		noff2 = encodeName(rr.SOA.RName, data, noff2)
+		binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial)
+		binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh)
+		binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry)
+		binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire)
+		binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum)
+	case DNSTypeMX:
+		binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference)
+		encodeName(rr.MX.Name, data, noff+12)
+	case DNSTypeTXT:
+		noff2 := noff + 10
+		for _, txt := range rr.TXTs {
+			data[noff2] = byte(len(txt))
+			copy(data[noff2+1:], txt)
+			noff2 += 1 + len(txt)
+		}
+	case DNSTypeSRV:
+		binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority)
+		binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight)
+		binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port)
+		encodeName(rr.SRV.Name, data, noff+16)
+	case DNSTypeOPT:
+		noff2 := noff + 10
+		for _, opt := range rr.OPT {
+			binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code))
+			binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data)))
+			copy(data[noff2+4:], opt.Data)
+			noff2 += 4 + len(opt.Data)
+		}
+	default:
+		return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type)
+	}
+
+	// DataLength
+	dSz := recSize(rr)
+	binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz))
+
+	if opts.FixLengths {
+		rr.DataLength = uint16(dSz)
+	}
+
+	return nSz + 10 + dSz, nil
+}
+
+func (rr *DNSResourceRecord) String() string {
+
+	if rr.Type == DNSTypeOPT {
+		opts := make([]string, len(rr.OPT))
+		for i, opt := range rr.OPT {
+			opts[i] = opt.String()
+		}
+		return "OPT " + strings.Join(opts, ",")
+	}
+	if rr.Class == DNSClassIN {
+		switch rr.Type {
+		case DNSTypeA, DNSTypeAAAA:
+			return rr.IP.String()
+		case DNSTypeNS:
+			return "NS " + string(rr.NS)
+		case DNSTypeCNAME:
+			return "CNAME " + string(rr.CNAME)
+		case DNSTypePTR:
+			return "PTR " + string(rr.PTR)
+		case DNSTypeTXT:
+			return "TXT " + string(rr.TXT)
+		}
+	}
+
+	return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type)
+}
+
+func decodeCharacterStrings(data []byte) ([][]byte, error) {
+	strings := make([][]byte, 0, 1)
+	end := len(data)
+	for index, index2 := 0, 0; index != end; index = index2 {
+		index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow
+		if index2 > end {
+			return nil, errCharStringMissData
+		}
+		strings = append(strings, data[index+1:index2])
+	}
+	return strings, nil
+}
+
+func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) {
+	allOPT := []DNSOPT{}
+	end := len(data)
+
+	if offset == end {
+		return allOPT, nil // There is no data to read
+	}
+
+	if offset+4 > end {
+		return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset)
+	}
+
+	for i := offset; i < end; {
+		opt := DNSOPT{}
+		opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2]))
+		l := binary.BigEndian.Uint16(data[i+2 : i+4])
+		if i+4+int(l) > end {
+			return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l)
+		}
+		opt.Data = data[i+4 : i+4+int(l)]
+		allOPT = append(allOPT, opt)
+		i += int(l) + 4
+	}
+	return allOPT, nil
+}
+
+func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error {
+	switch rr.Type {
+	case DNSTypeA:
+		rr.IP = rr.Data
+	case DNSTypeAAAA:
+		rr.IP = rr.Data
+	case DNSTypeTXT, DNSTypeHINFO:
+		rr.TXT = rr.Data
+		txts, err := decodeCharacterStrings(rr.Data)
+		if err != nil {
+			return err
+		}
+		rr.TXTs = txts
+	case DNSTypeNS:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.NS = name
+	case DNSTypeCNAME:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.CNAME = name
+	case DNSTypePTR:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.PTR = name
+	case DNSTypeSOA:
+		name, endq, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.SOA.MName = name
+		name, endq, err = decodeName(data, endq, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.SOA.RName = name
+		rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4])
+		rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+		rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12])
+		rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16])
+		rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20])
+	case DNSTypeMX:
+		rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2])
+		name, _, err := decodeName(data, offset+2, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.MX.Name = name
+	case DNSTypeSRV:
+		rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2])
+		rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6])
+		name, _, err := decodeName(data, offset+6, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.SRV.Name = name
+	case DNSTypeOPT:
+		allOPT, err := decodeOPTs(data, offset)
+		if err != nil {
+			return err
+		}
+		rr.OPT = allOPT
+	}
+	return nil
+}
+
+// DNSSOA is a Start of Authority record.  Each domain requires a SOA record at
+// the cutover where a domain is delegated from its parent.
+type DNSSOA struct {
+	MName, RName                            []byte
+	Serial, Refresh, Retry, Expire, Minimum uint32
+}
+
+// DNSSRV is a Service record, defining a location (hostname/port) of a
+// server/service.
+type DNSSRV struct {
+	Priority, Weight, Port uint16
+	Name                   []byte
+}
+
+// DNSMX is a mail exchange record, defining a mail server for a recipient's
+// domain.
+type DNSMX struct {
+	Preference uint16
+	Name       []byte
+}
+
+// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2
+type DNSOptionCode uint16
+
+func (doc DNSOptionCode) String() string {
+	switch doc {
+	default:
+		return "Unknown"
+	case DNSOptionCodeNSID:
+		return "NSID"
+	case DNSOptionCodeDAU:
+		return "DAU"
+	case DNSOptionCodeDHU:
+		return "DHU"
+	case DNSOptionCodeN3U:
+		return "N3U"
+	case DNSOptionCodeEDNSClientSubnet:
+		return "EDNSClientSubnet"
+	case DNSOptionCodeEDNSExpire:
+		return "EDNSExpire"
+	case DNSOptionCodeCookie:
+		return "Cookie"
+	case DNSOptionCodeEDNSKeepAlive:
+		return "EDNSKeepAlive"
+	case DNSOptionCodePadding:
+		return "CodePadding"
+	case DNSOptionCodeChain:
+		return "CodeChain"
+	case DNSOptionCodeEDNSKeyTag:
+		return "CodeEDNSKeyTag"
+	case DNSOptionCodeEDNSClientTag:
+		return "EDNSClientTag"
+	case DNSOptionCodeEDNSServerTag:
+		return "EDNSServerTag"
+	case DNSOptionCodeDeviceID:
+		return "DeviceID"
+	}
+}
+
+// DNSOptionCode known values. See IANA
+const (
+	DNSOptionCodeNSID             DNSOptionCode = 3
+	DNSOptionCodeDAU              DNSOptionCode = 5
+	DNSOptionCodeDHU              DNSOptionCode = 6
+	DNSOptionCodeN3U              DNSOptionCode = 7
+	DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8
+	DNSOptionCodeEDNSExpire       DNSOptionCode = 9
+	DNSOptionCodeCookie           DNSOptionCode = 10
+	DNSOptionCodeEDNSKeepAlive    DNSOptionCode = 11
+	DNSOptionCodePadding          DNSOptionCode = 12
+	DNSOptionCodeChain            DNSOptionCode = 13
+	DNSOptionCodeEDNSKeyTag       DNSOptionCode = 14
+	DNSOptionCodeEDNSClientTag    DNSOptionCode = 16
+	DNSOptionCodeEDNSServerTag    DNSOptionCode = 17
+	DNSOptionCodeDeviceID         DNSOptionCode = 26946
+)
+
+// DNSOPT is a DNS Option, see RFC6891, section 6.1.2
+type DNSOPT struct {
+	Code DNSOptionCode
+	Data []byte
+}
+
+func (opt DNSOPT) String() string {
+	return fmt.Sprintf("%s=%x", opt.Code, opt.Data)
+}
+
+var (
+	errMaxRecursion = errors.New("max DNS recursion level hit")
+
+	errDNSNameOffsetTooHigh    = errors.New("dns name offset too high")
+	errDNSNameOffsetNegative   = errors.New("dns name offset is negative")
+	errDNSPacketTooShort       = errors.New("DNS packet too short")
+	errDNSNameTooLong          = errors.New("dns name is too long")
+	errDNSNameInvalidIndex     = errors.New("dns name uncomputable: invalid index")
+	errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high")
+	errDNSIndexOutOfRange      = errors.New("dns index walked out of range")
+	errDNSNameHasNoData        = errors.New("no dns data found for name")
+
+	errCharStringMissData = errors.New("Insufficient data for a <character-string>")
+
+	errDecodeRecordLength = errors.New("resource record length exceeds data")
+
+	errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions")
+	errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers")
+	errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities")
+	errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info")
+)
diff --git a/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/google/gopacket/layers/doc.go
new file mode 100644
index 0000000..3c882c3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/doc.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package layers provides decoding layers for many common protocols.
+
+The layers package contains decode implementations for a number of different
+types of packet layers.  Users of gopacket will almost always want to also use
+layers to actually decode packet data into useful pieces. To see the set of
+protocols that gopacket/layers is currently able to decode,
+look at the set of LayerTypes defined in the Variables sections. The
+layers package also defines endpoints for many of the common packet layers
+that have source/destination addresses associated with them, for example IPv4/6
+(IPs) and TCP/UDP (ports).
+Finally, layers contains a number of useful enumerations (IPProtocol,
+EthernetType, LinkType, PPPType, etc...).  Many of these implement the
+gopacket.Decoder interface, so they can be passed into gopacket as decoders.
+
+Most common protocol layers are named using acronyms or other industry-common
+names (IPv4, TCP, PPP).  Some of the less common ones have their names expanded
+(CiscoDiscoveryProtocol).
+For certain protocols, sub-parts of the protocol are split out into their own
+layers (SCTP, for example).  This is done mostly in cases where portions of the
+protocol may fulfill the capabilities of interesting layers (SCTPData implements
+ApplicationLayer, while base SCTP implements TransportLayer), or possibly
+because splitting a protocol into a few layers makes decoding easier.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket.
+
+Port Types
+
+Instead of using raw uint16 or uint8 values for ports, we use a different port
+type for every protocol, for example TCPPort and UDPPort.  This allows us to
+override string behavior for each port, which we do by setting up port name
+maps (TCPPortNames, UDPPortNames, etc...).  Well-known ports are annotated with
+their protocol names, and their String function displays these names:
+
+  p := TCPPort(80)
+  fmt.Printf("Number: %d  String: %v", p, p)
+  // Prints: "Number: 80  String: 80(http)"
+
+Modifying Decode Behavior
+
+layers links together decoding through its enumerations.  For example, after
+decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder.
+All enumerations that act as decoders, like EthernetType, can be modified by
+users depending on their preferences.  For example, if you have a spiffy new
+IPv4 decoder that works way better than the one built into layers, you can do
+this:
+
+ var mySpiffyIPv4Decoder gopacket.Decoder = ...
+ layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder
+
+This will make all future ethernet packets use your new decoder to decode IPv4
+packets, instead of the built-in decoder used by gopacket.
+*/
+package layers
diff --git a/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/google/gopacket/layers/dot11.go
new file mode 100644
index 0000000..3843d70
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot11.go
@@ -0,0 +1,2105 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on
+// all of the layers in this file.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control
+// header, all in one place.
+type Dot11Flags uint8
+
+const (
+	Dot11FlagsToDS Dot11Flags = 1 << iota
+	Dot11FlagsFromDS
+	Dot11FlagsMF
+	Dot11FlagsRetry
+	Dot11FlagsPowerManagement
+	Dot11FlagsMD
+	Dot11FlagsWEP
+	Dot11FlagsOrder
+)
+
+func (d Dot11Flags) ToDS() bool {
+	return d&Dot11FlagsToDS != 0
+}
+func (d Dot11Flags) FromDS() bool {
+	return d&Dot11FlagsFromDS != 0
+}
+func (d Dot11Flags) MF() bool {
+	return d&Dot11FlagsMF != 0
+}
+func (d Dot11Flags) Retry() bool {
+	return d&Dot11FlagsRetry != 0
+}
+func (d Dot11Flags) PowerManagement() bool {
+	return d&Dot11FlagsPowerManagement != 0
+}
+func (d Dot11Flags) MD() bool {
+	return d&Dot11FlagsMD != 0
+}
+func (d Dot11Flags) WEP() bool {
+	return d&Dot11FlagsWEP != 0
+}
+func (d Dot11Flags) Order() bool {
+	return d&Dot11FlagsOrder != 0
+}
+
+// String provides a human readable string for Dot11Flags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Flags value, not its string.
+func (a Dot11Flags) String() string {
+	var out bytes.Buffer
+	if a.ToDS() {
+		out.WriteString("TO-DS,")
+	}
+	if a.FromDS() {
+		out.WriteString("FROM-DS,")
+	}
+	if a.MF() {
+		out.WriteString("MF,")
+	}
+	if a.Retry() {
+		out.WriteString("Retry,")
+	}
+	if a.PowerManagement() {
+		out.WriteString("PowerManagement,")
+	}
+	if a.MD() {
+		out.WriteString("MD,")
+	}
+	if a.WEP() {
+		out.WriteString("WEP,")
+	}
+	if a.Order() {
+		out.WriteString("Order,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type Dot11Reason uint16
+
+// TODO: Verify these reasons, and append more reasons if necessary.
+
+const (
+	Dot11ReasonReserved          Dot11Reason = 1
+	Dot11ReasonUnspecified       Dot11Reason = 2
+	Dot11ReasonAuthExpired       Dot11Reason = 3
+	Dot11ReasonDeauthStLeaving   Dot11Reason = 4
+	Dot11ReasonInactivity        Dot11Reason = 5
+	Dot11ReasonApFull            Dot11Reason = 6
+	Dot11ReasonClass2FromNonAuth Dot11Reason = 7
+	Dot11ReasonClass3FromNonAss  Dot11Reason = 8
+	Dot11ReasonDisasStLeaving    Dot11Reason = 9
+	Dot11ReasonStNotAuth         Dot11Reason = 10
+)
+
+// String provides a human readable string for Dot11Reason.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Reason value, not its string.
+func (a Dot11Reason) String() string {
+	switch a {
+	case Dot11ReasonReserved:
+		return "Reserved"
+	case Dot11ReasonUnspecified:
+		return "Unspecified"
+	case Dot11ReasonAuthExpired:
+		return "Auth. expired"
+	case Dot11ReasonDeauthStLeaving:
+		return "Deauth. st. leaving"
+	case Dot11ReasonInactivity:
+		return "Inactivity"
+	case Dot11ReasonApFull:
+		return "Ap. full"
+	case Dot11ReasonClass2FromNonAuth:
+		return "Class2 from non auth."
+	case Dot11ReasonClass3FromNonAss:
+		return "Class3 from non ass."
+	case Dot11ReasonDisasStLeaving:
+		return "Disass st. leaving"
+	case Dot11ReasonStNotAuth:
+		return "St. not auth."
+	default:
+		return "Unknown reason"
+	}
+}
+
+type Dot11Status uint16
+
+const (
+	Dot11StatusSuccess                      Dot11Status = 0
+	Dot11StatusFailure                      Dot11Status = 1  // Unspecified failure
+	Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field
+	Dot11StatusInabilityExistsAssociation   Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists
+	Dot11StatusAssociationDenied            Dot11Status = 12 // Association denied due to reason outside the scope of this standard
+	Dot11StatusAlgorithmUnsupported         Dot11Status = 13 // Responding station does not support the specified authentication algorithm
+	Dot11StatusOufOfExpectedSequence        Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence
+	Dot11StatusChallengeFailure             Dot11Status = 15 // Authentication rejected because of challenge failure
+	Dot11StatusTimeout                      Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence
+	Dot11StatusAPUnableToHandle             Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations
+	Dot11StatusRateUnsupported              Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter
+)
+
+// String provides a human readable string for Dot11Status.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Status value, not its string.
+func (a Dot11Status) String() string {
+	switch a {
+	case Dot11StatusSuccess:
+		return "success"
+	case Dot11StatusFailure:
+		return "failure"
+	case Dot11StatusCannotSupportAllCapabilities:
+		return "cannot-support-all-capabilities"
+	case Dot11StatusInabilityExistsAssociation:
+		return "inability-exists-association"
+	case Dot11StatusAssociationDenied:
+		return "association-denied"
+	case Dot11StatusAlgorithmUnsupported:
+		return "algorithm-unsupported"
+	case Dot11StatusOufOfExpectedSequence:
+		return "out-of-expected-sequence"
+	case Dot11StatusChallengeFailure:
+		return "challenge-failure"
+	case Dot11StatusTimeout:
+		return "timeout"
+	case Dot11StatusAPUnableToHandle:
+		return "ap-unable-to-handle"
+	case Dot11StatusRateUnsupported:
+		return "rate-unsupported"
+	default:
+		return "unknown status"
+	}
+}
+
+type Dot11AckPolicy uint8
+
+const (
+	Dot11AckPolicyNormal     Dot11AckPolicy = 0
+	Dot11AckPolicyNone       Dot11AckPolicy = 1
+	Dot11AckPolicyNoExplicit Dot11AckPolicy = 2
+	Dot11AckPolicyBlock      Dot11AckPolicy = 3
+)
+
+// String provides a human readable string for Dot11AckPolicy.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11AckPolicy value, not its string.
+func (a Dot11AckPolicy) String() string {
+	switch a {
+	case Dot11AckPolicyNormal:
+		return "normal-ack"
+	case Dot11AckPolicyNone:
+		return "no-ack"
+	case Dot11AckPolicyNoExplicit:
+		return "no-explicit-ack"
+	case Dot11AckPolicyBlock:
+		return "block-ack"
+	default:
+		return "unknown-ack-policy"
+	}
+}
+
+type Dot11Algorithm uint16
+
+const (
+	Dot11AlgorithmOpen      Dot11Algorithm = 0
+	Dot11AlgorithmSharedKey Dot11Algorithm = 1
+)
+
+// String provides a human readable string for Dot11Algorithm.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Algorithm value, not its string.
+func (a Dot11Algorithm) String() string {
+	switch a {
+	case Dot11AlgorithmOpen:
+		return "open"
+	case Dot11AlgorithmSharedKey:
+		return "shared-key"
+	default:
+		return "unknown-algorithm"
+	}
+}
+
+type Dot11InformationElementID uint8
+
+const (
+	Dot11InformationElementIDSSID                      Dot11InformationElementID = 0
+	Dot11InformationElementIDRates                     Dot11InformationElementID = 1
+	Dot11InformationElementIDFHSet                     Dot11InformationElementID = 2
+	Dot11InformationElementIDDSSet                     Dot11InformationElementID = 3
+	Dot11InformationElementIDCFSet                     Dot11InformationElementID = 4
+	Dot11InformationElementIDTIM                       Dot11InformationElementID = 5
+	Dot11InformationElementIDIBSSSet                   Dot11InformationElementID = 6
+	Dot11InformationElementIDCountryInfo               Dot11InformationElementID = 7
+	Dot11InformationElementIDHoppingPatternParam       Dot11InformationElementID = 8
+	Dot11InformationElementIDHoppingPatternTable       Dot11InformationElementID = 9
+	Dot11InformationElementIDRequest                   Dot11InformationElementID = 10
+	Dot11InformationElementIDQBSSLoadElem              Dot11InformationElementID = 11
+	Dot11InformationElementIDEDCAParamSet              Dot11InformationElementID = 12
+	Dot11InformationElementIDTrafficSpec               Dot11InformationElementID = 13
+	Dot11InformationElementIDTrafficClass              Dot11InformationElementID = 14
+	Dot11InformationElementIDSchedule                  Dot11InformationElementID = 15
+	Dot11InformationElementIDChallenge                 Dot11InformationElementID = 16
+	Dot11InformationElementIDPowerConst                Dot11InformationElementID = 32
+	Dot11InformationElementIDPowerCapability           Dot11InformationElementID = 33
+	Dot11InformationElementIDTPCRequest                Dot11InformationElementID = 34
+	Dot11InformationElementIDTPCReport                 Dot11InformationElementID = 35
+	Dot11InformationElementIDSupportedChannels         Dot11InformationElementID = 36
+	Dot11InformationElementIDSwitchChannelAnnounce     Dot11InformationElementID = 37
+	Dot11InformationElementIDMeasureRequest            Dot11InformationElementID = 38
+	Dot11InformationElementIDMeasureReport             Dot11InformationElementID = 39
+	Dot11InformationElementIDQuiet                     Dot11InformationElementID = 40
+	Dot11InformationElementIDIBSSDFS                   Dot11InformationElementID = 41
+	Dot11InformationElementIDERPInfo                   Dot11InformationElementID = 42
+	Dot11InformationElementIDTSDelay                   Dot11InformationElementID = 43
+	Dot11InformationElementIDTCLASProcessing           Dot11InformationElementID = 44
+	Dot11InformationElementIDHTCapabilities            Dot11InformationElementID = 45
+	Dot11InformationElementIDQOSCapability             Dot11InformationElementID = 46
+	Dot11InformationElementIDERPInfo2                  Dot11InformationElementID = 47
+	Dot11InformationElementIDRSNInfo                   Dot11InformationElementID = 48
+	Dot11InformationElementIDESRates                   Dot11InformationElementID = 50
+	Dot11InformationElementIDAPChannelReport           Dot11InformationElementID = 51
+	Dot11InformationElementIDNeighborReport            Dot11InformationElementID = 52
+	Dot11InformationElementIDRCPI                      Dot11InformationElementID = 53
+	Dot11InformationElementIDMobilityDomain            Dot11InformationElementID = 54
+	Dot11InformationElementIDFastBSSTrans              Dot11InformationElementID = 55
+	Dot11InformationElementIDTimeoutInt                Dot11InformationElementID = 56
+	Dot11InformationElementIDRICData                   Dot11InformationElementID = 57
+	Dot11InformationElementIDDSERegisteredLoc          Dot11InformationElementID = 58
+	Dot11InformationElementIDSuppOperatingClass        Dot11InformationElementID = 59
+	Dot11InformationElementIDExtChanSwitchAnnounce     Dot11InformationElementID = 60
+	Dot11InformationElementIDHTInfo                    Dot11InformationElementID = 61
+	Dot11InformationElementIDSecChanOffset             Dot11InformationElementID = 62
+	Dot11InformationElementIDBSSAverageAccessDelay     Dot11InformationElementID = 63
+	Dot11InformationElementIDAntenna                   Dot11InformationElementID = 64
+	Dot11InformationElementIDRSNI                      Dot11InformationElementID = 65
+	Dot11InformationElementIDMeasurePilotTrans         Dot11InformationElementID = 66
+	Dot11InformationElementIDBSSAvailAdmCapacity       Dot11InformationElementID = 67
+	Dot11InformationElementIDBSSACAccDelayWAPIParam    Dot11InformationElementID = 68
+	Dot11InformationElementIDTimeAdvertisement         Dot11InformationElementID = 69
+	Dot11InformationElementIDRMEnabledCapabilities     Dot11InformationElementID = 70
+	Dot11InformationElementIDMultipleBSSID             Dot11InformationElementID = 71
+	Dot11InformationElementID2040BSSCoExist            Dot11InformationElementID = 72
+	Dot11InformationElementID2040BSSIntChanReport      Dot11InformationElementID = 73
+	Dot11InformationElementIDOverlapBSSScanParam       Dot11InformationElementID = 74
+	Dot11InformationElementIDRICDescriptor             Dot11InformationElementID = 75
+	Dot11InformationElementIDManagementMIC             Dot11InformationElementID = 76
+	Dot11InformationElementIDEventRequest              Dot11InformationElementID = 78
+	Dot11InformationElementIDEventReport               Dot11InformationElementID = 79
+	Dot11InformationElementIDDiagnosticRequest         Dot11InformationElementID = 80
+	Dot11InformationElementIDDiagnosticReport          Dot11InformationElementID = 81
+	Dot11InformationElementIDLocationParam             Dot11InformationElementID = 82
+	Dot11InformationElementIDNonTransBSSIDCapability   Dot11InformationElementID = 83
+	Dot11InformationElementIDSSIDList                  Dot11InformationElementID = 84
+	Dot11InformationElementIDMultipleBSSIDIndex        Dot11InformationElementID = 85
+	Dot11InformationElementIDFMSDescriptor             Dot11InformationElementID = 86
+	Dot11InformationElementIDFMSRequest                Dot11InformationElementID = 87
+	Dot11InformationElementIDFMSResponse               Dot11InformationElementID = 88
+	Dot11InformationElementIDQOSTrafficCapability      Dot11InformationElementID = 89
+	Dot11InformationElementIDBSSMaxIdlePeriod          Dot11InformationElementID = 90
+	Dot11InformationElementIDTFSRequest                Dot11InformationElementID = 91
+	Dot11InformationElementIDTFSResponse               Dot11InformationElementID = 92
+	Dot11InformationElementIDWNMSleepMode              Dot11InformationElementID = 93
+	Dot11InformationElementIDTIMBroadcastRequest       Dot11InformationElementID = 94
+	Dot11InformationElementIDTIMBroadcastResponse      Dot11InformationElementID = 95
+	Dot11InformationElementIDCollInterferenceReport    Dot11InformationElementID = 96
+	Dot11InformationElementIDChannelUsage              Dot11InformationElementID = 97
+	Dot11InformationElementIDTimeZone                  Dot11InformationElementID = 98
+	Dot11InformationElementIDDMSRequest                Dot11InformationElementID = 99
+	Dot11InformationElementIDDMSResponse               Dot11InformationElementID = 100
+	Dot11InformationElementIDLinkIdentifier            Dot11InformationElementID = 101
+	Dot11InformationElementIDWakeupSchedule            Dot11InformationElementID = 102
+	Dot11InformationElementIDChannelSwitchTiming       Dot11InformationElementID = 104
+	Dot11InformationElementIDPTIControl                Dot11InformationElementID = 105
+	Dot11InformationElementIDPUBufferStatus            Dot11InformationElementID = 106
+	Dot11InformationElementIDInterworking              Dot11InformationElementID = 107
+	Dot11InformationElementIDAdvertisementProtocol     Dot11InformationElementID = 108
+	Dot11InformationElementIDExpBWRequest              Dot11InformationElementID = 109
+	Dot11InformationElementIDQOSMapSet                 Dot11InformationElementID = 110
+	Dot11InformationElementIDRoamingConsortium         Dot11InformationElementID = 111
+	Dot11InformationElementIDEmergencyAlertIdentifier  Dot11InformationElementID = 112
+	Dot11InformationElementIDMeshConfiguration         Dot11InformationElementID = 113
+	Dot11InformationElementIDMeshID                    Dot11InformationElementID = 114
+	Dot11InformationElementIDMeshLinkMetricReport      Dot11InformationElementID = 115
+	Dot11InformationElementIDCongestionNotification    Dot11InformationElementID = 116
+	Dot11InformationElementIDMeshPeeringManagement     Dot11InformationElementID = 117
+	Dot11InformationElementIDMeshChannelSwitchParam    Dot11InformationElementID = 118
+	Dot11InformationElementIDMeshAwakeWindows          Dot11InformationElementID = 119
+	Dot11InformationElementIDBeaconTiming              Dot11InformationElementID = 120
+	Dot11InformationElementIDMCCAOPSetupRequest        Dot11InformationElementID = 121
+	Dot11InformationElementIDMCCAOPSetupReply          Dot11InformationElementID = 122
+	Dot11InformationElementIDMCCAOPAdvertisement       Dot11InformationElementID = 123
+	Dot11InformationElementIDMCCAOPTeardown            Dot11InformationElementID = 124
+	Dot11InformationElementIDGateAnnouncement          Dot11InformationElementID = 125
+	Dot11InformationElementIDRootAnnouncement          Dot11InformationElementID = 126
+	Dot11InformationElementIDExtCapability             Dot11InformationElementID = 127
+	Dot11InformationElementIDAgereProprietary          Dot11InformationElementID = 128
+	Dot11InformationElementIDPathRequest               Dot11InformationElementID = 130
+	Dot11InformationElementIDPathReply                 Dot11InformationElementID = 131
+	Dot11InformationElementIDPathError                 Dot11InformationElementID = 132
+	Dot11InformationElementIDCiscoCCX1CKIPDeviceName   Dot11InformationElementID = 133
+	Dot11InformationElementIDCiscoCCX2                 Dot11InformationElementID = 136
+	Dot11InformationElementIDProxyUpdate               Dot11InformationElementID = 137
+	Dot11InformationElementIDProxyUpdateConfirmation   Dot11InformationElementID = 138
+	Dot11InformationElementIDAuthMeshPerringExch       Dot11InformationElementID = 139
+	Dot11InformationElementIDMIC                       Dot11InformationElementID = 140
+	Dot11InformationElementIDDestinationURI            Dot11InformationElementID = 141
+	Dot11InformationElementIDUAPSDCoexistence          Dot11InformationElementID = 142
+	Dot11InformationElementIDWakeupSchedule80211ad     Dot11InformationElementID = 143
+	Dot11InformationElementIDExtendedSchedule          Dot11InformationElementID = 144
+	Dot11InformationElementIDSTAAvailability           Dot11InformationElementID = 145
+	Dot11InformationElementIDDMGTSPEC                  Dot11InformationElementID = 146
+	Dot11InformationElementIDNextDMGATI                Dot11InformationElementID = 147
+	Dot11InformationElementIDDMSCapabilities           Dot11InformationElementID = 148
+	Dot11InformationElementIDCiscoUnknown95            Dot11InformationElementID = 149
+	Dot11InformationElementIDVendor2                   Dot11InformationElementID = 150
+	Dot11InformationElementIDDMGOperating              Dot11InformationElementID = 151
+	Dot11InformationElementIDDMGBSSParamChange         Dot11InformationElementID = 152
+	Dot11InformationElementIDDMGBeamRefinement         Dot11InformationElementID = 153
+	Dot11InformationElementIDChannelMeasFeedback       Dot11InformationElementID = 154
+	Dot11InformationElementIDAwakeWindow               Dot11InformationElementID = 157
+	Dot11InformationElementIDMultiBand                 Dot11InformationElementID = 158
+	Dot11InformationElementIDADDBAExtension            Dot11InformationElementID = 159
+	Dot11InformationElementIDNEXTPCPList               Dot11InformationElementID = 160
+	Dot11InformationElementIDPCPHandover               Dot11InformationElementID = 161
+	Dot11InformationElementIDDMGLinkMargin             Dot11InformationElementID = 162
+	Dot11InformationElementIDSwitchingStream           Dot11InformationElementID = 163
+	Dot11InformationElementIDSessionTransmission       Dot11InformationElementID = 164
+	Dot11InformationElementIDDynamicTonePairReport     Dot11InformationElementID = 165
+	Dot11InformationElementIDClusterReport             Dot11InformationElementID = 166
+	Dot11InformationElementIDRelayCapabilities         Dot11InformationElementID = 167
+	Dot11InformationElementIDRelayTransferParameter    Dot11InformationElementID = 168
+	Dot11InformationElementIDBeamlinkMaintenance       Dot11InformationElementID = 169
+	Dot11InformationElementIDMultipleMacSublayers      Dot11InformationElementID = 170
+	Dot11InformationElementIDUPID                      Dot11InformationElementID = 171
+	Dot11InformationElementIDDMGLinkAdaptionAck        Dot11InformationElementID = 172
+	Dot11InformationElementIDSymbolProprietary         Dot11InformationElementID = 173
+	Dot11InformationElementIDMCCAOPAdvertOverview      Dot11InformationElementID = 174
+	Dot11InformationElementIDQuietPeriodRequest        Dot11InformationElementID = 175
+	Dot11InformationElementIDQuietPeriodResponse       Dot11InformationElementID = 177
+	Dot11InformationElementIDECPACPolicy               Dot11InformationElementID = 182
+	Dot11InformationElementIDClusterTimeOffset         Dot11InformationElementID = 183
+	Dot11InformationElementIDAntennaSectorID           Dot11InformationElementID = 190
+	Dot11InformationElementIDVHTCapabilities           Dot11InformationElementID = 191
+	Dot11InformationElementIDVHTOperation              Dot11InformationElementID = 192
+	Dot11InformationElementIDExtendedBSSLoad           Dot11InformationElementID = 193
+	Dot11InformationElementIDWideBWChannelSwitch       Dot11InformationElementID = 194
+	Dot11InformationElementIDVHTTxPowerEnvelope        Dot11InformationElementID = 195
+	Dot11InformationElementIDChannelSwitchWrapper      Dot11InformationElementID = 196
+	Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199
+	Dot11InformationElementIDUPSIM                     Dot11InformationElementID = 200
+	Dot11InformationElementIDReducedNeighborReport     Dot11InformationElementID = 201
+	Dot11InformationElementIDTVHTOperation             Dot11InformationElementID = 202
+	Dot11InformationElementIDDeviceLocation            Dot11InformationElementID = 204
+	Dot11InformationElementIDWhiteSpaceMap             Dot11InformationElementID = 205
+	Dot11InformationElementIDFineTuningMeasureParams   Dot11InformationElementID = 206
+	Dot11InformationElementIDVendor                    Dot11InformationElementID = 221
+)
+
+// String provides a human readable string for Dot11InformationElementID.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11InformationElementID value,
+// not its string.
+func (a Dot11InformationElementID) String() string {
+	switch a {
+	case Dot11InformationElementIDSSID:
+		return "SSID parameter set"
+	case Dot11InformationElementIDRates:
+		return "Supported Rates"
+	case Dot11InformationElementIDFHSet:
+		return "FH Parameter set"
+	case Dot11InformationElementIDDSSet:
+		return "DS Parameter set"
+	case Dot11InformationElementIDCFSet:
+		return "CF Parameter set"
+	case Dot11InformationElementIDTIM:
+		return "Traffic Indication Map (TIM)"
+	case Dot11InformationElementIDIBSSSet:
+		return "IBSS Parameter set"
+	case Dot11InformationElementIDCountryInfo:
+		return "Country Information"
+	case Dot11InformationElementIDHoppingPatternParam:
+		return "Hopping Pattern Parameters"
+	case Dot11InformationElementIDHoppingPatternTable:
+		return "Hopping Pattern Table"
+	case Dot11InformationElementIDRequest:
+		return "Request"
+	case Dot11InformationElementIDQBSSLoadElem:
+		return "QBSS Load Element"
+	case Dot11InformationElementIDEDCAParamSet:
+		return "EDCA Parameter Set"
+	case Dot11InformationElementIDTrafficSpec:
+		return "Traffic Specification"
+	case Dot11InformationElementIDTrafficClass:
+		return "Traffic Classification"
+	case Dot11InformationElementIDSchedule:
+		return "Schedule"
+	case Dot11InformationElementIDChallenge:
+		return "Challenge text"
+	case Dot11InformationElementIDPowerConst:
+		return "Power Constraint"
+	case Dot11InformationElementIDPowerCapability:
+		return "Power Capability"
+	case Dot11InformationElementIDTPCRequest:
+		return "TPC Request"
+	case Dot11InformationElementIDTPCReport:
+		return "TPC Report"
+	case Dot11InformationElementIDSupportedChannels:
+		return "Supported Channels"
+	case Dot11InformationElementIDSwitchChannelAnnounce:
+		return "Channel Switch Announcement"
+	case Dot11InformationElementIDMeasureRequest:
+		return "Measurement Request"
+	case Dot11InformationElementIDMeasureReport:
+		return "Measurement Report"
+	case Dot11InformationElementIDQuiet:
+		return "Quiet"
+	case Dot11InformationElementIDIBSSDFS:
+		return "IBSS DFS"
+	case Dot11InformationElementIDERPInfo:
+		return "ERP Information"
+	case Dot11InformationElementIDTSDelay:
+		return "TS Delay"
+	case Dot11InformationElementIDTCLASProcessing:
+		return "TCLAS Processing"
+	case Dot11InformationElementIDHTCapabilities:
+		return "HT Capabilities (802.11n D1.10)"
+	case Dot11InformationElementIDQOSCapability:
+		return "QOS Capability"
+	case Dot11InformationElementIDERPInfo2:
+		return "ERP Information-2"
+	case Dot11InformationElementIDRSNInfo:
+		return "RSN Information"
+	case Dot11InformationElementIDESRates:
+		return "Extended Supported Rates"
+	case Dot11InformationElementIDAPChannelReport:
+		return "AP Channel Report"
+	case Dot11InformationElementIDNeighborReport:
+		return "Neighbor Report"
+	case Dot11InformationElementIDRCPI:
+		return "RCPI"
+	case Dot11InformationElementIDMobilityDomain:
+		return "Mobility Domain"
+	case Dot11InformationElementIDFastBSSTrans:
+		return "Fast BSS Transition"
+	case Dot11InformationElementIDTimeoutInt:
+		return "Timeout Interval"
+	case Dot11InformationElementIDRICData:
+		return "RIC Data"
+	case Dot11InformationElementIDDSERegisteredLoc:
+		return "DSE Registered Location"
+	case Dot11InformationElementIDSuppOperatingClass:
+		return "Supported Operating Classes"
+	case Dot11InformationElementIDExtChanSwitchAnnounce:
+		return "Extended Channel Switch Announcement"
+	case Dot11InformationElementIDHTInfo:
+		return "HT Information (802.11n D1.10)"
+	case Dot11InformationElementIDSecChanOffset:
+		return "Secondary Channel Offset (802.11n D1.10)"
+	case Dot11InformationElementIDBSSAverageAccessDelay:
+		return "BSS Average Access Delay"
+	case Dot11InformationElementIDAntenna:
+		return "Antenna"
+	case Dot11InformationElementIDRSNI:
+		return "RSNI"
+	case Dot11InformationElementIDMeasurePilotTrans:
+		return "Measurement Pilot Transmission"
+	case Dot11InformationElementIDBSSAvailAdmCapacity:
+		return "BSS Available Admission Capacity"
+	case Dot11InformationElementIDBSSACAccDelayWAPIParam:
+		return "BSS AC Access Delay/WAPI Parameter Set"
+	case Dot11InformationElementIDTimeAdvertisement:
+		return "Time Advertisement"
+	case Dot11InformationElementIDRMEnabledCapabilities:
+		return "RM Enabled Capabilities"
+	case Dot11InformationElementIDMultipleBSSID:
+		return "Multiple BSSID"
+	case Dot11InformationElementID2040BSSCoExist:
+		return "20/40 BSS Coexistence"
+	case Dot11InformationElementID2040BSSIntChanReport:
+		return "20/40 BSS Intolerant Channel Report"
+	case Dot11InformationElementIDOverlapBSSScanParam:
+		return "Overlapping BSS Scan Parameters"
+	case Dot11InformationElementIDRICDescriptor:
+		return "RIC Descriptor"
+	case Dot11InformationElementIDManagementMIC:
+		return "Management MIC"
+	case Dot11InformationElementIDEventRequest:
+		return "Event Request"
+	case Dot11InformationElementIDEventReport:
+		return "Event Report"
+	case Dot11InformationElementIDDiagnosticRequest:
+		return "Diagnostic Request"
+	case Dot11InformationElementIDDiagnosticReport:
+		return "Diagnostic Report"
+	case Dot11InformationElementIDLocationParam:
+		return "Location Parameters"
+	case Dot11InformationElementIDNonTransBSSIDCapability:
+		return "Non Transmitted BSSID Capability"
+	case Dot11InformationElementIDSSIDList:
+		return "SSID List"
+	case Dot11InformationElementIDMultipleBSSIDIndex:
+		return "Multiple BSSID Index"
+	case Dot11InformationElementIDFMSDescriptor:
+		return "FMS Descriptor"
+	case Dot11InformationElementIDFMSRequest:
+		return "FMS Request"
+	case Dot11InformationElementIDFMSResponse:
+		return "FMS Response"
+	case Dot11InformationElementIDQOSTrafficCapability:
+		return "QoS Traffic Capability"
+	case Dot11InformationElementIDBSSMaxIdlePeriod:
+		return "BSS Max Idle Period"
+	case Dot11InformationElementIDTFSRequest:
+		return "TFS Request"
+	case Dot11InformationElementIDTFSResponse:
+		return "TFS Response"
+	case Dot11InformationElementIDWNMSleepMode:
+		return "WNM-Sleep Mode"
+	case Dot11InformationElementIDTIMBroadcastRequest:
+		return "TIM Broadcast Request"
+	case Dot11InformationElementIDTIMBroadcastResponse:
+		return "TIM Broadcast Response"
+	case Dot11InformationElementIDCollInterferenceReport:
+		return "Collocated Interference Report"
+	case Dot11InformationElementIDChannelUsage:
+		return "Channel Usage"
+	case Dot11InformationElementIDTimeZone:
+		return "Time Zone"
+	case Dot11InformationElementIDDMSRequest:
+		return "DMS Request"
+	case Dot11InformationElementIDDMSResponse:
+		return "DMS Response"
+	case Dot11InformationElementIDLinkIdentifier:
+		return "Link Identifier"
+	case Dot11InformationElementIDWakeupSchedule:
+		return "Wakeup Schedule"
+	case Dot11InformationElementIDChannelSwitchTiming:
+		return "Channel Switch Timing"
+	case Dot11InformationElementIDPTIControl:
+		return "PTI Control"
+	case Dot11InformationElementIDPUBufferStatus:
+		return "PU Buffer Status"
+	case Dot11InformationElementIDInterworking:
+		return "Interworking"
+	case Dot11InformationElementIDAdvertisementProtocol:
+		return "Advertisement Protocol"
+	case Dot11InformationElementIDExpBWRequest:
+		return "Expedited Bandwidth Request"
+	case Dot11InformationElementIDQOSMapSet:
+		return "QoS Map Set"
+	case Dot11InformationElementIDRoamingConsortium:
+		return "Roaming Consortium"
+	case Dot11InformationElementIDEmergencyAlertIdentifier:
+		return "Emergency Alert Identifier"
+	case Dot11InformationElementIDMeshConfiguration:
+		return "Mesh Configuration"
+	case Dot11InformationElementIDMeshID:
+		return "Mesh ID"
+	case Dot11InformationElementIDMeshLinkMetricReport:
+		return "Mesh Link Metric Report"
+	case Dot11InformationElementIDCongestionNotification:
+		return "Congestion Notification"
+	case Dot11InformationElementIDMeshPeeringManagement:
+		return "Mesh Peering Management"
+	case Dot11InformationElementIDMeshChannelSwitchParam:
+		return "Mesh Channel Switch Parameters"
+	case Dot11InformationElementIDMeshAwakeWindows:
+		return "Mesh Awake Windows"
+	case Dot11InformationElementIDBeaconTiming:
+		return "Beacon Timing"
+	case Dot11InformationElementIDMCCAOPSetupRequest:
+		return "MCCAOP Setup Request"
+	case Dot11InformationElementIDMCCAOPSetupReply:
+		return "MCCAOP SETUP Reply"
+	case Dot11InformationElementIDMCCAOPAdvertisement:
+		return "MCCAOP Advertisement"
+	case Dot11InformationElementIDMCCAOPTeardown:
+		return "MCCAOP Teardown"
+	case Dot11InformationElementIDGateAnnouncement:
+		return "Gate Announcement"
+	case Dot11InformationElementIDRootAnnouncement:
+		return "Root Announcement"
+	case Dot11InformationElementIDExtCapability:
+		return "Extended Capabilities"
+	case Dot11InformationElementIDAgereProprietary:
+		return "Agere Proprietary"
+	case Dot11InformationElementIDPathRequest:
+		return "Path Request"
+	case Dot11InformationElementIDPathReply:
+		return "Path Reply"
+	case Dot11InformationElementIDPathError:
+		return "Path Error"
+	case Dot11InformationElementIDCiscoCCX1CKIPDeviceName:
+		return "Cisco CCX1 CKIP + Device Name"
+	case Dot11InformationElementIDCiscoCCX2:
+		return "Cisco CCX2"
+	case Dot11InformationElementIDProxyUpdate:
+		return "Proxy Update"
+	case Dot11InformationElementIDProxyUpdateConfirmation:
+		return "Proxy Update Confirmation"
+	case Dot11InformationElementIDAuthMeshPerringExch:
+		return "Auhenticated Mesh Perring Exchange"
+	case Dot11InformationElementIDMIC:
+		return "MIC (Message Integrity Code)"
+	case Dot11InformationElementIDDestinationURI:
+		return "Destination URI"
+	case Dot11InformationElementIDUAPSDCoexistence:
+		return "U-APSD Coexistence"
+	case Dot11InformationElementIDWakeupSchedule80211ad:
+		return "Wakeup Schedule 802.11ad"
+	case Dot11InformationElementIDExtendedSchedule:
+		return "Extended Schedule"
+	case Dot11InformationElementIDSTAAvailability:
+		return "STA Availability"
+	case Dot11InformationElementIDDMGTSPEC:
+		return "DMG TSPEC"
+	case Dot11InformationElementIDNextDMGATI:
+		return "Next DMG ATI"
+	case Dot11InformationElementIDDMSCapabilities:
+		return "DMG Capabilities"
+	case Dot11InformationElementIDCiscoUnknown95:
+		return "Cisco Unknown 95"
+	case Dot11InformationElementIDVendor2:
+		return "Vendor Specific"
+	case Dot11InformationElementIDDMGOperating:
+		return "DMG Operating"
+	case Dot11InformationElementIDDMGBSSParamChange:
+		return "DMG BSS Parameter Change"
+	case Dot11InformationElementIDDMGBeamRefinement:
+		return "DMG Beam Refinement"
+	case Dot11InformationElementIDChannelMeasFeedback:
+		return "Channel Measurement Feedback"
+	case Dot11InformationElementIDAwakeWindow:
+		return "Awake Window"
+	case Dot11InformationElementIDMultiBand:
+		return "Multi Band"
+	case Dot11InformationElementIDADDBAExtension:
+		return "ADDBA Extension"
+	case Dot11InformationElementIDNEXTPCPList:
+		return "NEXTPCP List"
+	case Dot11InformationElementIDPCPHandover:
+		return "PCP Handover"
+	case Dot11InformationElementIDDMGLinkMargin:
+		return "DMG Link Margin"
+	case Dot11InformationElementIDSwitchingStream:
+		return "Switching Stream"
+	case Dot11InformationElementIDSessionTransmission:
+		return "Session Transmission"
+	case Dot11InformationElementIDDynamicTonePairReport:
+		return "Dynamic Tone Pairing Report"
+	case Dot11InformationElementIDClusterReport:
+		return "Cluster Report"
+	case Dot11InformationElementIDRelayCapabilities:
+		return "Relay Capabilities"
+	case Dot11InformationElementIDRelayTransferParameter:
+		return "Relay Transfer Parameter"
+	case Dot11InformationElementIDBeamlinkMaintenance:
+		return "Beamlink Maintenance"
+	case Dot11InformationElementIDMultipleMacSublayers:
+		return "Multiple MAC Sublayers"
+	case Dot11InformationElementIDUPID:
+		return "U-PID"
+	case Dot11InformationElementIDDMGLinkAdaptionAck:
+		return "DMG Link Adaption Acknowledgment"
+	case Dot11InformationElementIDSymbolProprietary:
+		return "Symbol Proprietary"
+	case Dot11InformationElementIDMCCAOPAdvertOverview:
+		return "MCCAOP Advertisement Overview"
+	case Dot11InformationElementIDQuietPeriodRequest:
+		return "Quiet Period Request"
+	case Dot11InformationElementIDQuietPeriodResponse:
+		return "Quiet Period Response"
+	case Dot11InformationElementIDECPACPolicy:
+		return "ECPAC Policy"
+	case Dot11InformationElementIDClusterTimeOffset:
+		return "Cluster Time Offset"
+	case Dot11InformationElementIDAntennaSectorID:
+		return "Antenna Sector ID"
+	case Dot11InformationElementIDVHTCapabilities:
+		return "VHT Capabilities (IEEE Std 802.11ac/D3.1)"
+	case Dot11InformationElementIDVHTOperation:
+		return "VHT Operation (IEEE Std 802.11ac/D3.1)"
+	case Dot11InformationElementIDExtendedBSSLoad:
+		return "Extended BSS Load"
+	case Dot11InformationElementIDWideBWChannelSwitch:
+		return "Wide Bandwidth Channel Switch"
+	case Dot11InformationElementIDVHTTxPowerEnvelope:
+		return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)"
+	case Dot11InformationElementIDChannelSwitchWrapper:
+		return "Channel Switch Wrapper"
+	case Dot11InformationElementIDOperatingModeNotification:
+		return "Operating Mode Notification"
+	case Dot11InformationElementIDUPSIM:
+		return "UP SIM"
+	case Dot11InformationElementIDReducedNeighborReport:
+		return "Reduced Neighbor Report"
+	case Dot11InformationElementIDTVHTOperation:
+		return "TVHT Op"
+	case Dot11InformationElementIDDeviceLocation:
+		return "Device Location"
+	case Dot11InformationElementIDWhiteSpaceMap:
+		return "White Space Map"
+	case Dot11InformationElementIDFineTuningMeasureParams:
+		return "Fine Tuning Measure Parameters"
+	case Dot11InformationElementIDVendor:
+		return "Vendor"
+	default:
+		return "Unknown information element id"
+	}
+}
+
+// Dot11 provides an IEEE 802.11 base packet header.
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html
+// for excruciating detail.
+type Dot11 struct {
+	BaseLayer
+	Type           Dot11Type
+	Proto          uint8
+	Flags          Dot11Flags
+	DurationID     uint16
+	Address1       net.HardwareAddr
+	Address2       net.HardwareAddr
+	Address3       net.HardwareAddr
+	Address4       net.HardwareAddr
+	SequenceNumber uint16
+	FragmentNumber uint16
+	Checksum       uint32
+	QOS            *Dot11QOS
+	HTControl      *Dot11HTControl
+	DataLayer      gopacket.Layer
+}
+
+type Dot11QOS struct {
+	TID       uint8 /* Traffic IDentifier */
+	EOSP      bool  /* End of service period */
+	AckPolicy Dot11AckPolicy
+	TXOP      uint8
+}
+
+type Dot11HTControl struct {
+	ACConstraint bool
+	RDGMorePPDU  bool
+
+	VHT *Dot11HTControlVHT
+	HT  *Dot11HTControlHT
+}
+
+type Dot11HTControlHT struct {
+	LinkAdapationControl *Dot11LinkAdapationControl
+	CalibrationPosition  uint8
+	CalibrationSequence  uint8
+	CSISteering          uint8
+	NDPAnnouncement      bool
+	DEI                  bool
+}
+
+type Dot11HTControlVHT struct {
+	MRQ            bool
+	UnsolicitedMFB bool
+	MSI            *uint8
+	MFB            Dot11HTControlMFB
+	CompressedMSI  *uint8
+	STBCIndication bool
+	MFSI           *uint8
+	GID            *uint8
+	CodingType     *Dot11CodingType
+	FbTXBeamformed bool
+}
+
+type Dot11HTControlMFB struct {
+	NumSTS uint8
+	VHTMCS uint8
+	BW     uint8
+	SNR    int8
+}
+
+type Dot11LinkAdapationControl struct {
+	TRQ  bool
+	MRQ  bool
+	MSI  uint8
+	MFSI uint8
+	ASEL *Dot11ASEL
+	MFB  *uint8
+}
+
+type Dot11ASEL struct {
+	Command uint8
+	Data    uint8
+}
+
+type Dot11CodingType uint8
+
+const (
+	Dot11CodingTypeBCC  = 0
+	Dot11CodingTypeLDPC = 1
+)
+
+func (a Dot11CodingType) String() string {
+	switch a {
+	case Dot11CodingTypeBCC:
+		return "BCC"
+	case Dot11CodingTypeLDPC:
+		return "LDPC"
+	default:
+		return "Unknown coding type"
+	}
+}
+
+func (m *Dot11HTControlMFB) NoFeedBackPresent() bool {
+	return m.VHTMCS == 15 && m.NumSTS == 7
+}
+
+func decodeDot11(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	if d.DataLayer != nil {
+		p.AddLayer(d.DataLayer)
+	}
+	return p.NextDecoder(d.NextLayerType())
+}
+
+func (m *Dot11) LayerType() gopacket.LayerType  { return LayerTypeDot11 }
+func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 }
+func (m *Dot11) NextLayerType() gopacket.LayerType {
+	if m.DataLayer != nil {
+		if m.Flags.WEP() {
+			return LayerTypeDot11WEP
+		}
+		return m.DataLayer.(gopacket.DecodingLayer).NextLayerType()
+	}
+	return m.Type.LayerType()
+}
+
+func createU8(x uint8) *uint8 {
+	return &x
+}
+
+var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{
+	Dot11TypeData:                   func() gopacket.DecodingLayer { return &Dot11Data{} },
+	Dot11TypeDataCFAck:              func() gopacket.DecodingLayer { return &Dot11DataCFAck{} },
+	Dot11TypeDataCFPoll:             func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} },
+	Dot11TypeDataCFAckPoll:          func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} },
+	Dot11TypeDataNull:               func() gopacket.DecodingLayer { return &Dot11DataNull{} },
+	Dot11TypeDataCFAckNoData:        func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} },
+	Dot11TypeDataCFPollNoData:       func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} },
+	Dot11TypeDataCFAckPollNoData:    func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} },
+	Dot11TypeDataQOSData:            func() gopacket.DecodingLayer { return &Dot11DataQOSData{} },
+	Dot11TypeDataQOSDataCFAck:       func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} },
+	Dot11TypeDataQOSDataCFPoll:      func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} },
+	Dot11TypeDataQOSDataCFAckPoll:   func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} },
+	Dot11TypeDataQOSNull:            func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} },
+	Dot11TypeDataQOSCFPollNoData:    func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} },
+	Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} },
+}
+
+func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 10 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10)
+	}
+	m.Type = Dot11Type((data[0])&0xFC) >> 2
+
+	m.Proto = uint8(data[0]) & 0x0003
+	m.Flags = Dot11Flags(data[1])
+	m.DurationID = binary.LittleEndian.Uint16(data[2:4])
+	m.Address1 = net.HardwareAddr(data[4:10])
+
+	offset := 10
+
+	mainType := m.Type.MainType()
+
+	switch mainType {
+	case Dot11TypeCtrl:
+		switch m.Type {
+		case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+			if len(data) < offset+6 {
+				df.SetTruncated()
+				return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+			}
+			m.Address2 = net.HardwareAddr(data[offset : offset+6])
+			offset += 6
+		}
+	case Dot11TypeMgmt, Dot11TypeData:
+		if len(data) < offset+14 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14)
+		}
+		m.Address2 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+		m.Address3 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+
+		m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4
+		m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F)
+		offset += 2
+	}
+
+	if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+		if len(data) < offset+6 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+		m.Address4 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+	}
+
+	if m.Type.QOS() {
+		if len(data) < offset+2 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+		m.QOS = &Dot11QOS{
+			TID:       (uint8(data[offset]) & 0x0F),
+			EOSP:      (uint8(data[offset]) & 0x10) == 0x10,
+			AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5),
+			TXOP:      uint8(data[offset+1]),
+		}
+		offset += 2
+	}
+	if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) {
+		if len(data) < offset+4 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+
+		htc := &Dot11HTControl{
+			ACConstraint: data[offset+3]&0x40 != 0,
+			RDGMorePPDU:  data[offset+3]&0x80 != 0,
+		}
+		m.HTControl = htc
+
+		if data[offset]&0x1 != 0 { // VHT Variant
+			vht := &Dot11HTControlVHT{}
+			htc.VHT = vht
+			vht.MRQ = data[offset]&0x4 != 0
+			vht.UnsolicitedMFB = data[offset+3]&0x20 != 0
+			vht.MFB = Dot11HTControlMFB{
+				NumSTS: uint8(data[offset+1] >> 1 & 0x7),
+				VHTMCS: uint8(data[offset+1] >> 4 & 0xF),
+				BW:     uint8(data[offset+2] & 0x3),
+				SNR:    int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22,
+			}
+
+			if vht.UnsolicitedMFB {
+				if !vht.MFB.NoFeedBackPresent() {
+					vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3)
+					vht.STBCIndication = data[offset]&0x20 != 0
+					vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1))
+					vht.FbTXBeamformed = data[offset+3]&0x10 != 0
+					vht.GID = createU8(
+						data[offset]>>6 +
+							(data[offset+1] & 0x1 << 2) +
+							data[offset+3]&0x7<<3)
+				}
+			} else {
+				if vht.MRQ {
+					vht.MSI = createU8((data[offset] >> 3) & 0x07)
+				}
+				vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2))
+			}
+
+		} else { // HT Variant
+			ht := &Dot11HTControlHT{}
+			htc.HT = ht
+
+			lac := &Dot11LinkAdapationControl{}
+			ht.LinkAdapationControl = lac
+			lac.TRQ = data[offset]&0x2 != 0
+			lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3
+			if data[offset]&0x3C == 0x38 { // ASEL
+				lac.ASEL = &Dot11ASEL{
+					Command: data[offset+1] >> 1 & 0x7,
+					Data:    data[offset+1] >> 4 & 0xF,
+				}
+			} else {
+				lac.MRQ = data[offset]&0x4 != 0
+				if lac.MRQ {
+					lac.MSI = data[offset] >> 3 & 0x7
+				}
+				lac.MFB = createU8(data[offset+1] >> 1)
+			}
+			ht.CalibrationPosition = data[offset+2] & 0x3
+			ht.CalibrationSequence = data[offset+2] >> 2 & 0x3
+			ht.CSISteering = data[offset+2] >> 6 & 0x3
+			ht.NDPAnnouncement = data[offset+3]&0x1 != 0
+			if mainType != Dot11TypeMgmt {
+				ht.DEI = data[offset+3]&0x20 != 0
+			}
+		}
+
+		offset += 4
+	}
+
+	if len(data) < offset+4 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4)
+	}
+
+	m.BaseLayer = BaseLayer{
+		Contents: data[0:offset],
+		Payload:  data[offset : len(data)-4],
+	}
+
+	if mainType == Dot11TypeData {
+		l := dataDecodeMap[m.Type]()
+		err := l.DecodeFromBytes(m.BaseLayer.Payload, df)
+		if err != nil {
+			return err
+		}
+		m.DataLayer = l.(gopacket.Layer)
+	}
+
+	m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)])
+	return nil
+}
+
+func (m *Dot11) ChecksumValid() bool {
+	// only for CTRL and MGMT frames
+	h := crc32.NewIEEE()
+	h.Write(m.Contents)
+	h.Write(m.Payload)
+	return m.Checksum == h.Sum32()
+}
+
+func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(24)
+
+	if err != nil {
+		return err
+	}
+
+	buf[0] = (uint8(m.Type) << 2) | m.Proto
+	buf[1] = uint8(m.Flags)
+
+	binary.LittleEndian.PutUint16(buf[2:4], m.DurationID)
+
+	copy(buf[4:10], m.Address1)
+
+	offset := 10
+
+	switch m.Type.MainType() {
+	case Dot11TypeCtrl:
+		switch m.Type {
+		case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+			copy(buf[offset:offset+6], m.Address2)
+			offset += 6
+		}
+	case Dot11TypeMgmt, Dot11TypeData:
+		copy(buf[offset:offset+6], m.Address2)
+		offset += 6
+		copy(buf[offset:offset+6], m.Address3)
+		offset += 6
+
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber)
+		offset += 2
+	}
+
+	if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+		copy(buf[offset:offset+6], m.Address4)
+		offset += 6
+	}
+
+	return nil
+}
+
+// Dot11Mgmt is a base for all IEEE 802.11 management layers.
+type Dot11Mgmt struct {
+	BaseLayer
+}
+
+func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+// Dot11Ctrl is a base for all IEEE 802.11 control layers.
+type Dot11Ctrl struct {
+	BaseLayer
+}
+
+func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11Ctrl) LayerType() gopacket.LayerType  { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11Ctrl{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11WEP contains WEP encrpted IEEE 802.11 data.
+type Dot11WEP struct {
+	BaseLayer
+}
+
+func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11WEP) LayerType() gopacket.LayerType  { return LayerTypeDot11WEP }
+func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP }
+func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11WEP{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11Data is a base for all IEEE 802.11 data layers.
+type Dot11Data struct {
+	BaseLayer
+}
+
+func (m *Dot11Data) NextLayerType() gopacket.LayerType {
+	return LayerTypeLLC
+}
+
+func (m *Dot11Data) LayerType() gopacket.LayerType  { return LayerTypeDot11Data }
+func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data }
+func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Payload = data
+	return nil
+}
+
+func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11Data{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11DataCFAck struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAck) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPoll struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPoll struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataNull struct {
+	Dot11Data
+}
+
+func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataNull{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataNull) LayerType() gopacket.LayerType  { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPollNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPollNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataQOS struct {
+	Dot11Ctrl
+}
+
+func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.BaseLayer = BaseLayer{Payload: data}
+	return nil
+}
+
+type Dot11DataQOSData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSData) LayerType() gopacket.LayerType  { return LayerTypeDot11DataQOSData }
+func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData }
+
+func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11Data
+}
+
+type Dot11DataQOSDataCFAck struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType     { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass    { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+
+type Dot11DataQOSDataCFPoll struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+
+type Dot11DataQOSDataCFAckPoll struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFAckPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPoll
+}
+
+type Dot11DataQOSNull struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSNull{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType     { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass    { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+
+type Dot11DataQOSCFPollNoData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSCFPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFPollNoData
+}
+
+type Dot11DataQOSCFAckPollNoData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSCFAckPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+
+type Dot11InformationElement struct {
+	BaseLayer
+	ID     Dot11InformationElementID
+	Length uint8
+	OUI    []byte
+	Info   []byte
+}
+
+func (m *Dot11InformationElement) LayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2)
+	}
+	m.ID = Dot11InformationElementID(data[0])
+	m.Length = data[1]
+	offset := int(2)
+
+	if len(data) < offset+int(m.Length) {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length))
+	}
+	if m.ID == 221 {
+		// Vendor extension
+		m.OUI = data[offset : offset+4]
+		m.Info = data[offset+4 : offset+int(m.Length)]
+	} else {
+		m.Info = data[offset : offset+int(m.Length)]
+	}
+
+	offset += int(m.Length)
+
+	m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]}
+	return nil
+}
+
+func (d *Dot11InformationElement) String() string {
+	if d.ID == 0 {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info))
+	} else if d.ID == 1 {
+		rates := ""
+		for i := 0; i < len(d.Info); i++ {
+			if d.Info[i]&0x80 == 0 {
+				rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5)
+			} else {
+				rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5)
+			}
+		}
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates)
+	} else if d.ID == 221 {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info)
+	} else {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info)
+	}
+}
+
+func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := len(m.Info) + len(m.OUI)
+	if buf, err := b.PrependBytes(2 + length); err != nil {
+		return err
+	} else {
+		buf[0] = uint8(m.ID)
+		buf[1] = uint8(length)
+		copy(buf[2:], m.OUI)
+		copy(buf[2+len(m.OUI):], m.Info)
+	}
+	return nil
+}
+
+func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11InformationElement{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11CtrlCTS struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCTS{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlRTS struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlRTS{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAckReq struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlBlockAckReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlBlockAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType  { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlPowersavePoll struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlPowersavePoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlAck) LayerType() gopacket.LayerType  { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEnd struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCFEnd{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEndAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCFEndAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11MgmtAssociationReq struct {
+	Dot11Mgmt
+	CapabilityInfo uint16
+	ListenInterval uint16
+}
+
+func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAssociationReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+	m.Payload = data[4:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(4)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+	return nil
+}
+
+type Dot11MgmtAssociationResp struct {
+	Dot11Mgmt
+	CapabilityInfo uint16
+	Status         Dot11Status
+	AID            uint16
+}
+
+func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAssociationResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 6 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4]))
+	m.AID = binary.LittleEndian.Uint16(data[4:6])
+	m.Payload = data[6:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(6)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status))
+	binary.LittleEndian.PutUint16(buf[4:6], m.AID)
+
+	return nil
+}
+
+type Dot11MgmtReassociationReq struct {
+	Dot11Mgmt
+	CapabilityInfo   uint16
+	ListenInterval   uint16
+	CurrentApAddress net.HardwareAddr
+}
+
+func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtReassociationReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 10 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+	m.CurrentApAddress = net.HardwareAddr(data[4:10])
+	m.Payload = data[10:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(10)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+	copy(buf[4:10], m.CurrentApAddress)
+
+	return nil
+}
+
+type Dot11MgmtReassociationResp struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtReassociationResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeReq struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtProbeReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeResp struct {
+	Dot11Mgmt
+	Timestamp uint64
+	Interval  uint16
+	Flags     uint16
+}
+
+func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtProbeResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+
+		return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12)
+	}
+
+	m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+	m.Interval = binary.LittleEndian.Uint16(data[8:10])
+	m.Flags = binary.LittleEndian.Uint16(data[10:12])
+	m.Payload = data[12:]
+
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(12)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+	binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+	binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+	return nil
+}
+
+type Dot11MgmtMeasurementPilot struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtMeasurementPilot{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtMeasurementPilot
+}
+func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtMeasurementPilot
+}
+
+type Dot11MgmtBeacon struct {
+	Dot11Mgmt
+	Timestamp uint64
+	Interval  uint16
+	Flags     uint16
+}
+
+func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtBeacon{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12)
+	}
+	m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+	m.Interval = binary.LittleEndian.Uint16(data[8:10])
+	m.Flags = binary.LittleEndian.Uint16(data[10:12])
+	m.Payload = data[12:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement }
+
+func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(12)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+	binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+	binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+	return nil
+}
+
+type Dot11MgmtATIM struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtATIM{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtATIM }
+func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM }
+
+type Dot11MgmtDisassociation struct {
+	Dot11Mgmt
+	Reason Dot11Reason
+}
+
+func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtDisassociation{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2)
+	}
+	m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(2)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+	return nil
+}
+
+type Dot11MgmtAuthentication struct {
+	Dot11Mgmt
+	Algorithm Dot11Algorithm
+	Sequence  uint16
+	Status    Dot11Status
+}
+
+func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAuthentication{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 6 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6)
+	}
+	m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2]))
+	m.Sequence = binary.LittleEndian.Uint16(data[2:4])
+	m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6]))
+	m.Payload = data[6:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(6)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm))
+	binary.LittleEndian.PutUint16(buf[2:4], m.Sequence)
+	binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status))
+
+	return nil
+}
+
+type Dot11MgmtDeauthentication struct {
+	Dot11Mgmt
+	Reason Dot11Reason
+}
+
+func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtDeauthentication{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2)
+	}
+	m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(2)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+	return nil
+}
+
+type Dot11MgmtAction struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAction{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAction) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtAction }
+func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction }
+
+type Dot11MgmtActionNoAck struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtActionNoAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtActionNoAck }
+func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck }
+
+type Dot11MgmtArubaWLAN struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtArubaWLAN{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtArubaWLAN }
+func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN }
diff --git a/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/google/gopacket/layers/dot1q.go
new file mode 100644
index 0000000..47f93d7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot1q.go
@@ -0,0 +1,71 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// Dot1Q is the packet layer for 802.1Q VLAN headers.
+type Dot1Q struct {
+	BaseLayer
+	Priority       uint8
+	DropEligible   bool
+	VLANIdentifier uint16
+	Type           EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeDot1Q
+func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	d.Priority = (data[0] & 0xE0) >> 5
+	d.DropEligible = data[0]&0x10 != 0
+	d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF
+	d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *Dot1Q) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot1Q
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *Dot1Q) NextLayerType() gopacket.LayerType {
+	return d.Type.LayerType()
+}
+
+func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot1Q{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	if d.VLANIdentifier > 0xFFF {
+		return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier)
+	}
+	firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier
+	if d.DropEligible {
+		firstBytes |= 0x1000
+	}
+	binary.BigEndian.PutUint16(bytes, firstBytes)
+	binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type))
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/google/gopacket/layers/eap.go
new file mode 100644
index 0000000..250f857
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eap.go
@@ -0,0 +1,106 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+type EAPCode uint8
+type EAPType uint8
+
+const (
+	EAPCodeRequest  EAPCode = 1
+	EAPCodeResponse EAPCode = 2
+	EAPCodeSuccess  EAPCode = 3
+	EAPCodeFailure  EAPCode = 4
+
+	// EAPTypeNone means that this EAP layer has no Type or TypeData.
+	// Success and Failure EAPs will have this set.
+	EAPTypeNone EAPType = 0
+
+	EAPTypeIdentity     EAPType = 1
+	EAPTypeNotification EAPType = 2
+	EAPTypeNACK         EAPType = 3
+	EAPTypeOTP          EAPType = 4
+	EAPTypeTokenCard    EAPType = 5
+)
+
+// EAP defines an Extensible Authentication Protocol (rfc 3748) layer.
+type EAP struct {
+	BaseLayer
+	Code     EAPCode
+	Id       uint8
+	Length   uint16
+	Type     EAPType
+	TypeData []byte
+}
+
+// LayerType returns LayerTypeEAP.
+func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	e.Code = EAPCode(data[0])
+	e.Id = data[1]
+	e.Length = binary.BigEndian.Uint16(data[2:4])
+	switch {
+	case e.Length > 4:
+		e.Type = EAPType(data[4])
+		e.TypeData = data[5:]
+	case e.Length == 4:
+		e.Type = 0
+		e.TypeData = nil
+	default:
+		return fmt.Errorf("invalid EAP length %d", e.Length)
+	}
+	e.BaseLayer.Contents = data[:e.Length]
+	e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if opts.FixLengths {
+		e.Length = uint16(len(e.TypeData) + 1)
+	}
+	size := len(e.TypeData) + 4
+	if size > 4 {
+		size++
+	}
+	bytes, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(e.Code)
+	bytes[1] = e.Id
+	binary.BigEndian.PutUint16(bytes[2:], e.Length)
+	if size > 4 {
+		bytes[4] = byte(e.Type)
+		copy(bytes[5:], e.TypeData)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAP) CanDecode() gopacket.LayerClass {
+	return LayerTypeEAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+func decodeEAP(data []byte, p gopacket.PacketBuilder) error {
+	e := &EAP{}
+	return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go
new file mode 100644
index 0000000..12aa5ba
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eapol.go
@@ -0,0 +1,298 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// EAPOL defines an EAP over LAN (802.1x) layer.
+type EAPOL struct {
+	BaseLayer
+	Version uint8
+	Type    EAPOLType
+	Length  uint16
+}
+
+// LayerType returns LayerTypeEAPOL.
+func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	e.Version = data[0]
+	e.Type = EAPOLType(data[1])
+	e.Length = binary.BigEndian.Uint16(data[2:4])
+	e.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer
+func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, _ := b.PrependBytes(4)
+	bytes[0] = e.Version
+	bytes[1] = byte(e.Type)
+	binary.BigEndian.PutUint16(bytes[2:], e.Length)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAPOL) CanDecode() gopacket.LayerClass {
+	return LayerTypeEAPOL
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAPOL) NextLayerType() gopacket.LayerType {
+	return e.Type.LayerType()
+}
+
+func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error {
+	e := &EAPOL{}
+	return decodingLayerDecoder(e, data, p)
+}
+
+// EAPOLKeyDescriptorType is an enumeration of key descriptor types
+// as specified by 802.1x in the EAPOL-Key frame
+type EAPOLKeyDescriptorType uint8
+
+// Enumeration of EAPOLKeyDescriptorType
+const (
+	EAPOLKeyDescriptorTypeRC4   EAPOLKeyDescriptorType = 1
+	EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2
+	EAPOLKeyDescriptorTypeWPA   EAPOLKeyDescriptorType = 254
+)
+
+func (kdt EAPOLKeyDescriptorType) String() string {
+	switch kdt {
+	case EAPOLKeyDescriptorTypeRC4:
+		return "RC4"
+	case EAPOLKeyDescriptorTypeDot11:
+		return "802.11"
+	case EAPOLKeyDescriptorTypeWPA:
+		return "WPA"
+	default:
+		return fmt.Sprintf("unknown descriptor type %d", kdt)
+	}
+}
+
+// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the
+// encryption algorithm for the key data and the authentication for the
+// message integrity code (MIC)
+type EAPOLKeyDescriptorVersion uint8
+
+// Enumeration of EAPOLKeyDescriptorVersion
+const (
+	EAPOLKeyDescriptorVersionOther       EAPOLKeyDescriptorVersion = 0
+	EAPOLKeyDescriptorVersionRC4HMACMD5  EAPOLKeyDescriptorVersion = 1
+	EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2
+	EAPOLKeyDescriptorVersionAES128CMAC  EAPOLKeyDescriptorVersion = 3
+)
+
+func (v EAPOLKeyDescriptorVersion) String() string {
+	switch v {
+	case EAPOLKeyDescriptorVersionOther:
+		return "Other"
+	case EAPOLKeyDescriptorVersionRC4HMACMD5:
+		return "RC4-HMAC-MD5"
+	case EAPOLKeyDescriptorVersionAESHMACSHA1:
+		return "AES-HMAC-SHA1-128"
+	case EAPOLKeyDescriptorVersionAES128CMAC:
+		return "AES-128-CMAC"
+	default:
+		return fmt.Sprintf("unknown version %d", v)
+	}
+}
+
+// EAPOLKeyType is an enumeration of key derivation types describing
+// the purpose of the keys being derived.
+type EAPOLKeyType uint8
+
+// Enumeration of EAPOLKeyType
+const (
+	EAPOLKeyTypeGroupSMK EAPOLKeyType = 0
+	EAPOLKeyTypePairwise EAPOLKeyType = 1
+)
+
+func (kt EAPOLKeyType) String() string {
+	switch kt {
+	case EAPOLKeyTypeGroupSMK:
+		return "Group/SMK"
+	case EAPOLKeyTypePairwise:
+		return "Pairwise"
+	default:
+		return fmt.Sprintf("unknown key type %d", kt)
+	}
+}
+
+// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication
+type EAPOLKey struct {
+	BaseLayer
+	KeyDescriptorType    EAPOLKeyDescriptorType
+	KeyDescriptorVersion EAPOLKeyDescriptorVersion
+	KeyType              EAPOLKeyType
+	KeyIndex             uint8
+	Install              bool
+	KeyACK               bool
+	KeyMIC               bool
+	Secure               bool
+	MICError             bool
+	Request              bool
+	HasEncryptedKeyData  bool
+	SMKMessage           bool
+	KeyLength            uint16
+	ReplayCounter        uint64
+	Nonce                []byte
+	IV                   []byte
+	RSC                  uint64
+	ID                   uint64
+	MIC                  []byte
+	KeyDataLength        uint16
+	EncryptedKeyData     []byte
+}
+
+// LayerType returns LayerTypeEAPOLKey.
+func (ek *EAPOLKey) LayerType() gopacket.LayerType {
+	return LayerTypeEAPOLKey
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ek *EAPOLKey) CanDecode() gopacket.LayerType {
+	return LayerTypeEAPOLKey
+}
+
+// NextLayerType returns layers.LayerTypeDot11InformationElement if the key
+// data exists and is unencrypted, otherwise it does not expect a next layer.
+func (ek *EAPOLKey) NextLayerType() gopacket.LayerType {
+	if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 {
+		return LayerTypeDot11InformationElement
+	}
+	return gopacket.LayerTypePayload
+}
+
+const eapolKeyFrameLen = 95
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < eapolKeyFrameLen {
+		df.SetTruncated()
+		return fmt.Errorf("EAPOLKey length %v too short, %v required",
+			len(data), eapolKeyFrameLen)
+	}
+
+	ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0])
+
+	info := binary.BigEndian.Uint16(data[1:3])
+	ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007)
+	ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3)
+	ek.KeyIndex = uint8((info & 0x0030) >> 4)
+	ek.Install = (info & 0x0040) != 0
+	ek.KeyACK = (info & 0x0080) != 0
+	ek.KeyMIC = (info & 0x0100) != 0
+	ek.Secure = (info & 0x0200) != 0
+	ek.MICError = (info & 0x0400) != 0
+	ek.Request = (info & 0x0800) != 0
+	ek.HasEncryptedKeyData = (info & 0x1000) != 0
+	ek.SMKMessage = (info & 0x2000) != 0
+
+	ek.KeyLength = binary.BigEndian.Uint16(data[3:5])
+	ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13])
+
+	ek.Nonce = data[13:45]
+	ek.IV = data[45:61]
+	ek.RSC = binary.BigEndian.Uint64(data[61:69])
+	ek.ID = binary.BigEndian.Uint64(data[69:77])
+	ek.MIC = data[77:93]
+
+	ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95])
+
+	totalLength := eapolKeyFrameLen + int(ek.KeyDataLength)
+	if len(data) < totalLength {
+		df.SetTruncated()
+		return fmt.Errorf("EAPOLKey data length %d too short, %d required",
+			len(data)-eapolKeyFrameLen, ek.KeyDataLength)
+	}
+
+	if ek.HasEncryptedKeyData {
+		ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength]
+		ek.BaseLayer = BaseLayer{
+			Contents: data[:totalLength],
+			Payload:  data[totalLength:],
+		}
+	} else {
+		ek.BaseLayer = BaseLayer{
+			Contents: data[:eapolKeyFrameLen],
+			Payload:  data[eapolKeyFrameLen:],
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData))
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(ek.KeyDescriptorType)
+
+	var info uint16
+	info |= uint16(ek.KeyDescriptorVersion)
+	info |= uint16(ek.KeyType) << 3
+	info |= uint16(ek.KeyIndex) << 4
+	if ek.Install {
+		info |= 0x0040
+	}
+	if ek.KeyACK {
+		info |= 0x0080
+	}
+	if ek.KeyMIC {
+		info |= 0x0100
+	}
+	if ek.Secure {
+		info |= 0x0200
+	}
+	if ek.MICError {
+		info |= 0x0400
+	}
+	if ek.Request {
+		info |= 0x0800
+	}
+	if ek.HasEncryptedKeyData {
+		info |= 0x1000
+	}
+	if ek.SMKMessage {
+		info |= 0x2000
+	}
+	binary.BigEndian.PutUint16(buf[1:3], info)
+
+	binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength)
+	binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter)
+
+	copy(buf[13:45], ek.Nonce)
+	copy(buf[45:61], ek.IV)
+	binary.BigEndian.PutUint64(buf[61:69], ek.RSC)
+	binary.BigEndian.PutUint64(buf[69:77], ek.ID)
+	copy(buf[77:93], ek.MIC)
+
+	binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength)
+	if len(ek.EncryptedKeyData) > 0 {
+		copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData)
+	}
+
+	return nil
+}
+
+func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error {
+	ek := &EAPOLKey{}
+	return decodingLayerDecoder(ek, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/google/gopacket/layers/endpoints.go
new file mode 100644
index 0000000..4c91cc3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/endpoints.go
@@ -0,0 +1,97 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+	"net"
+	"strconv"
+)
+
+var (
+	// We use two different endpoint types for IPv4 vs IPv6 addresses, so that
+	// ordering with endpointA.LessThan(endpointB) sanely groups all IPv4
+	// addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses.
+	EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string {
+		return net.IP(b).String()
+	}})
+	EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string {
+		return net.IP(b).String()
+	}})
+
+	EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string {
+		return net.HardwareAddr(b).String()
+	}})
+	EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(b[0]))
+	}})
+	EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string {
+		return "point"
+	}})
+)
+
+// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address.
+// It returns gopacket.InvalidEndpoint if the IP address is invalid.
+func NewIPEndpoint(a net.IP) gopacket.Endpoint {
+	ipv4 := a.To4()
+	if ipv4 != nil {
+		return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4))
+	}
+
+	ipv6 := a.To16()
+	if ipv6 != nil {
+		return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6))
+	}
+
+	return gopacket.InvalidEndpoint
+}
+
+// NewMACEndpoint returns a new MAC address endpoint.
+func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointMAC, []byte(a))
+}
+func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint {
+	return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)})
+}
+
+// NewTCPPortEndpoint returns an endpoint based on a TCP port.
+func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointTCPPort, uint16(p))
+}
+
+// NewUDPPortEndpoint returns an endpoint based on a UDP port.
+func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointUDPPort, uint16(p))
+}
+
+// NewSCTPPortEndpoint returns an endpoint based on a SCTP port.
+func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointSCTPPort, uint16(p))
+}
+
+// NewRUDPPortEndpoint returns an endpoint based on a RUDP port.
+func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)})
+}
+
+// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port.
+func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointUDPLitePort, uint16(p))
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/google/gopacket/layers/enums.go
new file mode 100644
index 0000000..fa443e6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums.go
@@ -0,0 +1,448 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+
+	"github.com/google/gopacket"
+)
+
+// EnumMetadata keeps track of a set of metadata for each enumeration value
+// for protocol enumerations.
+type EnumMetadata struct {
+	// DecodeWith is the decoder to use to decode this protocol's data.
+	DecodeWith gopacket.Decoder
+	// Name is the name of the enumeration value.
+	Name string
+	// LayerType is the layer type implied by the given enum.
+	LayerType gopacket.LayerType
+}
+
+// errorFunc returns a decoder that spits out a specific error message.
+func errorFunc(msg string) gopacket.Decoder {
+	var e = errors.New(msg)
+	return gopacket.DecodeFunc(func([]byte, gopacket.PacketBuilder) error {
+		return e
+	})
+}
+
+// EthernetType is an enumeration of ethernet type values, and acts as a decoder
+// for any type it supports.
+type EthernetType uint16
+
+const (
+	// EthernetTypeLLC is not an actual ethernet type.  It is instead a
+	// placeholder we use in Ethernet frames that use the 802.3 standard of
+	// srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype.
+	EthernetTypeLLC                         EthernetType = 0
+	EthernetTypeIPv4                        EthernetType = 0x0800
+	EthernetTypeARP                         EthernetType = 0x0806
+	EthernetTypeIPv6                        EthernetType = 0x86DD
+	EthernetTypeCiscoDiscovery              EthernetType = 0x2000
+	EthernetTypeNortelDiscovery             EthernetType = 0x01a2
+	EthernetTypeTransparentEthernetBridging EthernetType = 0x6558
+	EthernetTypeDot1Q                       EthernetType = 0x8100
+	EthernetTypePPP                         EthernetType = 0x880b
+	EthernetTypePPPoEDiscovery              EthernetType = 0x8863
+	EthernetTypePPPoESession                EthernetType = 0x8864
+	EthernetTypeMPLSUnicast                 EthernetType = 0x8847
+	EthernetTypeMPLSMulticast               EthernetType = 0x8848
+	EthernetTypeEAPOL                       EthernetType = 0x888e
+	EthernetTypeQinQ                        EthernetType = 0x88a8
+	EthernetTypeLinkLayerDiscovery          EthernetType = 0x88cc
+	EthernetTypeEthernetCTP                 EthernetType = 0x9000
+)
+
+// IPProtocol is an enumeration of IP protocol values, and acts as a decoder
+// for any type it supports.
+type IPProtocol uint8
+
+const (
+	IPProtocolIPv6HopByHop    IPProtocol = 0
+	IPProtocolICMPv4          IPProtocol = 1
+	IPProtocolIGMP            IPProtocol = 2
+	IPProtocolIPv4            IPProtocol = 4
+	IPProtocolTCP             IPProtocol = 6
+	IPProtocolUDP             IPProtocol = 17
+	IPProtocolRUDP            IPProtocol = 27
+	IPProtocolIPv6            IPProtocol = 41
+	IPProtocolIPv6Routing     IPProtocol = 43
+	IPProtocolIPv6Fragment    IPProtocol = 44
+	IPProtocolGRE             IPProtocol = 47
+	IPProtocolESP             IPProtocol = 50
+	IPProtocolAH              IPProtocol = 51
+	IPProtocolICMPv6          IPProtocol = 58
+	IPProtocolNoNextHeader    IPProtocol = 59
+	IPProtocolIPv6Destination IPProtocol = 60
+	IPProtocolOSPF            IPProtocol = 89
+	IPProtocolIPIP            IPProtocol = 94
+	IPProtocolEtherIP         IPProtocol = 97
+	IPProtocolVRRP            IPProtocol = 112
+	IPProtocolSCTP            IPProtocol = 132
+	IPProtocolUDPLite         IPProtocol = 136
+	IPProtocolMPLSInIP        IPProtocol = 137
+)
+
+// LinkType is an enumeration of link types, and acts as a decoder for any
+// link type it supports.
+type LinkType uint8
+
+const (
+	// According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html
+	LinkTypeNull           LinkType = 0
+	LinkTypeEthernet       LinkType = 1
+	LinkTypeAX25           LinkType = 3
+	LinkTypeTokenRing      LinkType = 6
+	LinkTypeArcNet         LinkType = 7
+	LinkTypeSLIP           LinkType = 8
+	LinkTypePPP            LinkType = 9
+	LinkTypeFDDI           LinkType = 10
+	LinkTypePPP_HDLC       LinkType = 50
+	LinkTypePPPEthernet    LinkType = 51
+	LinkTypeATM_RFC1483    LinkType = 100
+	LinkTypeRaw            LinkType = 101
+	LinkTypeC_HDLC         LinkType = 104
+	LinkTypeIEEE802_11     LinkType = 105
+	LinkTypeFRelay         LinkType = 107
+	LinkTypeLoop           LinkType = 108
+	LinkTypeLinuxSLL       LinkType = 113
+	LinkTypeLTalk          LinkType = 114
+	LinkTypePFLog          LinkType = 117
+	LinkTypePrismHeader    LinkType = 119
+	LinkTypeIPOverFC       LinkType = 122
+	LinkTypeSunATM         LinkType = 123
+	LinkTypeIEEE80211Radio LinkType = 127
+	LinkTypeARCNetLinux    LinkType = 129
+	LinkTypeIPOver1394     LinkType = 138
+	LinkTypeMTP2Phdr       LinkType = 139
+	LinkTypeMTP2           LinkType = 140
+	LinkTypeMTP3           LinkType = 141
+	LinkTypeSCCP           LinkType = 142
+	LinkTypeDOCSIS         LinkType = 143
+	LinkTypeLinuxIRDA      LinkType = 144
+	LinkTypeLinuxLAPD      LinkType = 177
+	LinkTypeLinuxUSB       LinkType = 220
+	LinkTypeIPv4           LinkType = 228
+	LinkTypeIPv6           LinkType = 229
+)
+
+// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516
+type PPPoECode uint8
+
+const (
+	PPPoECodePADI    PPPoECode = 0x09
+	PPPoECodePADO    PPPoECode = 0x07
+	PPPoECodePADR    PPPoECode = 0x19
+	PPPoECodePADS    PPPoECode = 0x65
+	PPPoECodePADT    PPPoECode = 0xA7
+	PPPoECodeSession PPPoECode = 0x00
+)
+
+// PPPType is an enumeration of PPP type values, and acts as a decoder for any
+// type it supports.
+type PPPType uint16
+
+const (
+	PPPTypeIPv4          PPPType = 0x0021
+	PPPTypeIPv6          PPPType = 0x0057
+	PPPTypeMPLSUnicast   PPPType = 0x0281
+	PPPTypeMPLSMulticast PPPType = 0x0283
+)
+
+// SCTPChunkType is an enumeration of chunk types inside SCTP packets.
+type SCTPChunkType uint8
+
+const (
+	SCTPChunkTypeData             SCTPChunkType = 0
+	SCTPChunkTypeInit             SCTPChunkType = 1
+	SCTPChunkTypeInitAck          SCTPChunkType = 2
+	SCTPChunkTypeSack             SCTPChunkType = 3
+	SCTPChunkTypeHeartbeat        SCTPChunkType = 4
+	SCTPChunkTypeHeartbeatAck     SCTPChunkType = 5
+	SCTPChunkTypeAbort            SCTPChunkType = 6
+	SCTPChunkTypeShutdown         SCTPChunkType = 7
+	SCTPChunkTypeShutdownAck      SCTPChunkType = 8
+	SCTPChunkTypeError            SCTPChunkType = 9
+	SCTPChunkTypeCookieEcho       SCTPChunkType = 10
+	SCTPChunkTypeCookieAck        SCTPChunkType = 11
+	SCTPChunkTypeShutdownComplete SCTPChunkType = 14
+)
+
+// FDDIFrameControl is an enumeration of FDDI frame control bytes.
+type FDDIFrameControl uint8
+
+const (
+	FDDIFrameControlLLC FDDIFrameControl = 0x50
+)
+
+// EAPOLType is an enumeration of EAPOL packet types.
+type EAPOLType uint8
+
+const (
+	EAPOLTypeEAP      EAPOLType = 0
+	EAPOLTypeStart    EAPOLType = 1
+	EAPOLTypeLogOff   EAPOLType = 2
+	EAPOLTypeKey      EAPOLType = 3
+	EAPOLTypeASFAlert EAPOLType = 4
+)
+
+// ProtocolFamily is the set of values defined as PF_* in sys/socket.h
+type ProtocolFamily uint8
+
+const (
+	ProtocolFamilyIPv4 ProtocolFamily = 2
+	// BSDs use different values for INET6... glory be.  These values taken from
+	// tcpdump 4.3.0.
+	ProtocolFamilyIPv6BSD     ProtocolFamily = 24
+	ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28
+	ProtocolFamilyIPv6Darwin  ProtocolFamily = 30
+	ProtocolFamilyIPv6Linux   ProtocolFamily = 10
+)
+
+// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields.
+// By combining these two fields together into a single type, we're able to
+// provide a String function that correctly displays the subtype given the
+// top-level type.
+//
+// If you just care about the top-level type, use the MainType function.
+type Dot11Type uint8
+
+// MainType strips the subtype information from the given type,
+// returning just the overarching type (Mgmt, Ctrl, Data, Reserved).
+func (d Dot11Type) MainType() Dot11Type {
+	return d & dot11TypeMask
+}
+
+func (d Dot11Type) QOS() bool {
+	return d&dot11QOSMask == Dot11TypeDataQOSData
+}
+
+const (
+	Dot11TypeMgmt     Dot11Type = 0x00
+	Dot11TypeCtrl     Dot11Type = 0x01
+	Dot11TypeData     Dot11Type = 0x02
+	Dot11TypeReserved Dot11Type = 0x03
+	dot11TypeMask               = 0x03
+	dot11QOSMask                = 0x23
+
+	// The following are type/subtype conglomerations.
+
+	// Management
+	Dot11TypeMgmtAssociationReq    Dot11Type = 0x00
+	Dot11TypeMgmtAssociationResp   Dot11Type = 0x04
+	Dot11TypeMgmtReassociationReq  Dot11Type = 0x08
+	Dot11TypeMgmtReassociationResp Dot11Type = 0x0c
+	Dot11TypeMgmtProbeReq          Dot11Type = 0x10
+	Dot11TypeMgmtProbeResp         Dot11Type = 0x14
+	Dot11TypeMgmtMeasurementPilot  Dot11Type = 0x18
+	Dot11TypeMgmtBeacon            Dot11Type = 0x20
+	Dot11TypeMgmtATIM              Dot11Type = 0x24
+	Dot11TypeMgmtDisassociation    Dot11Type = 0x28
+	Dot11TypeMgmtAuthentication    Dot11Type = 0x2c
+	Dot11TypeMgmtDeauthentication  Dot11Type = 0x30
+	Dot11TypeMgmtAction            Dot11Type = 0x34
+	Dot11TypeMgmtActionNoAck       Dot11Type = 0x38
+
+	// Control
+	Dot11TypeCtrlWrapper       Dot11Type = 0x1d
+	Dot11TypeCtrlBlockAckReq   Dot11Type = 0x21
+	Dot11TypeCtrlBlockAck      Dot11Type = 0x25
+	Dot11TypeCtrlPowersavePoll Dot11Type = 0x29
+	Dot11TypeCtrlRTS           Dot11Type = 0x2d
+	Dot11TypeCtrlCTS           Dot11Type = 0x31
+	Dot11TypeCtrlAck           Dot11Type = 0x35
+	Dot11TypeCtrlCFEnd         Dot11Type = 0x39
+	Dot11TypeCtrlCFEndAck      Dot11Type = 0x3d
+
+	// Data
+	Dot11TypeDataCFAck              Dot11Type = 0x06
+	Dot11TypeDataCFPoll             Dot11Type = 0x0a
+	Dot11TypeDataCFAckPoll          Dot11Type = 0x0e
+	Dot11TypeDataNull               Dot11Type = 0x12
+	Dot11TypeDataCFAckNoData        Dot11Type = 0x16
+	Dot11TypeDataCFPollNoData       Dot11Type = 0x1a
+	Dot11TypeDataCFAckPollNoData    Dot11Type = 0x1e
+	Dot11TypeDataQOSData            Dot11Type = 0x22
+	Dot11TypeDataQOSDataCFAck       Dot11Type = 0x26
+	Dot11TypeDataQOSDataCFPoll      Dot11Type = 0x2a
+	Dot11TypeDataQOSDataCFAckPoll   Dot11Type = 0x2e
+	Dot11TypeDataQOSNull            Dot11Type = 0x32
+	Dot11TypeDataQOSCFPollNoData    Dot11Type = 0x3a
+	Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e
+)
+
+// Decode a raw v4 or v6 IP packet.
+func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error {
+	version := data[0] >> 4
+	switch version {
+	case 4:
+		return decodeIPv4(data, p)
+	case 6:
+		return decodeIPv6(data, p)
+	}
+	return fmt.Errorf("Invalid IP packet version %v", version)
+}
+
+func initActualTypeData() {
+	// Each of the XXXTypeMetadata arrays contains mappings of how to handle enum
+	// values for various enum types in gopacket/layers.
+	// These arrays are actually created by gen2.go and stored in
+	// enums_generated.go.
+	//
+	// So, EthernetTypeMetadata[2] contains information on how to handle EthernetType
+	// 2, including which name to give it and which decoder to use to decode
+	// packet data of that type.  These arrays are filled by default with all of the
+	// protocols gopacket/layers knows how to handle, but users of the library can
+	// add new decoders or override existing ones.  For example, if you write a better
+	// TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith
+	// with your new decoder, and all gopacket/layers decoding will use your new
+	// decoder whenever they encounter that IPProtocol.
+
+	// Here we link up all enumerations with their respective names and decoders.
+	EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC}
+	EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP}
+	EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+	EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP}
+	EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE}
+	EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE}
+	EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP}
+	EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery}
+	EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery}
+	EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery}
+	EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS}
+	EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS}
+	EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL}
+	EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+	EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet}
+
+	IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP}
+	IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP}
+	IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4}
+	IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6}
+	IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP}
+	IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP}
+	IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP}
+	IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE}
+	IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop}
+	IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing}
+	IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment}
+	IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination}
+	IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF}
+	IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH}
+	IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP}
+	IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite}
+	IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS}
+	IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload}
+	IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP}
+	IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP}
+
+	SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"}
+
+	PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"}
+	PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"}
+	PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"}
+	PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"}
+
+	PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+
+	LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"}
+	LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+	LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"}
+	LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"}
+	LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"}
+	LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"}
+	LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"}
+	LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	// See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85
+	// Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508
+	if runtime.GOOS == "openbsd" {
+		LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	} else {
+		LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	}
+	LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"}
+	LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"}
+	LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"}
+	LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"}
+	LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"}
+
+	FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"}
+
+	EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP}
+	EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey}
+
+	ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+
+	Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq}
+	Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp}
+	Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq}
+	Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp}
+	Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq}
+	Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp}
+	Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot}
+	Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon}
+	Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM}
+	Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation}
+	Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication}
+	Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication}
+	Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction}
+	Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck}
+	Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl}
+	Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl}
+	Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq}
+	Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck}
+	Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll}
+	Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS}
+	Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS}
+	Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck}
+	Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd}
+	Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck}
+	Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data}
+	Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck}
+	Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll}
+	Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll}
+	Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull}
+	Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData}
+	Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll}
+	Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull}
+	Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData}
+
+	USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt}
+	USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl}
+	USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk}
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/google/gopacket/layers/enums_generated.go
new file mode 100644
index 0000000..bf77aac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums_generated.go
@@ -0,0 +1,434 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033
+
+import (
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+func init() {
+	initUnknownTypesForLinkType()
+	initUnknownTypesForEthernetType()
+	initUnknownTypesForPPPType()
+	initUnknownTypesForIPProtocol()
+	initUnknownTypesForSCTPChunkType()
+	initUnknownTypesForPPPoECode()
+	initUnknownTypesForFDDIFrameControl()
+	initUnknownTypesForEAPOLType()
+	initUnknownTypesForProtocolFamily()
+	initUnknownTypesForDot11Type()
+	initUnknownTypesForUSBTransportType()
+	initActualTypeData()
+}
+
+// Decoder calls LinkTypeMetadata.DecodeWith's decoder.
+func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return LinkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns LinkTypeMetadata.Name.
+func (a LinkType) String() string {
+	return LinkTypeMetadata[a].Name
+}
+
+// LayerType returns LinkTypeMetadata.LayerType.
+func (a LinkType) LayerType() gopacket.LayerType {
+	return LinkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForLinkType int
+
+func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForLinkType) Error() string {
+	return fmt.Sprintf("Unable to decode LinkType %d", int(*a))
+}
+
+var errorDecodersForLinkType [256]errorDecoderForLinkType
+var LinkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForLinkType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForLinkType[i] = errorDecoderForLinkType(i)
+		LinkTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForLinkType[i],
+			Name:       "UnknownLinkType",
+		}
+	}
+}
+
+// Decoder calls EthernetTypeMetadata.DecodeWith's decoder.
+func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return EthernetTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EthernetTypeMetadata.Name.
+func (a EthernetType) String() string {
+	return EthernetTypeMetadata[a].Name
+}
+
+// LayerType returns EthernetTypeMetadata.LayerType.
+func (a EthernetType) LayerType() gopacket.LayerType {
+	return EthernetTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEthernetType int
+
+func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForEthernetType) Error() string {
+	return fmt.Sprintf("Unable to decode EthernetType %d", int(*a))
+}
+
+var errorDecodersForEthernetType [65536]errorDecoderForEthernetType
+var EthernetTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForEthernetType() {
+	for i := 0; i < 65536; i++ {
+		errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i)
+		EthernetTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForEthernetType[i],
+			Name:       "UnknownEthernetType",
+		}
+	}
+}
+
+// Decoder calls PPPTypeMetadata.DecodeWith's decoder.
+func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return PPPTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPTypeMetadata.Name.
+func (a PPPType) String() string {
+	return PPPTypeMetadata[a].Name
+}
+
+// LayerType returns PPPTypeMetadata.LayerType.
+func (a PPPType) LayerType() gopacket.LayerType {
+	return PPPTypeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPType int
+
+func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForPPPType) Error() string {
+	return fmt.Sprintf("Unable to decode PPPType %d", int(*a))
+}
+
+var errorDecodersForPPPType [65536]errorDecoderForPPPType
+var PPPTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForPPPType() {
+	for i := 0; i < 65536; i++ {
+		errorDecodersForPPPType[i] = errorDecoderForPPPType(i)
+		PPPTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForPPPType[i],
+			Name:       "UnknownPPPType",
+		}
+	}
+}
+
+// Decoder calls IPProtocolMetadata.DecodeWith's decoder.
+func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return IPProtocolMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns IPProtocolMetadata.Name.
+func (a IPProtocol) String() string {
+	return IPProtocolMetadata[a].Name
+}
+
+// LayerType returns IPProtocolMetadata.LayerType.
+func (a IPProtocol) LayerType() gopacket.LayerType {
+	return IPProtocolMetadata[a].LayerType
+}
+
+type errorDecoderForIPProtocol int
+
+func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForIPProtocol) Error() string {
+	return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a))
+}
+
+var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol
+var IPProtocolMetadata [256]EnumMetadata
+
+func initUnknownTypesForIPProtocol() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i)
+		IPProtocolMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForIPProtocol[i],
+			Name:       "UnknownIPProtocol",
+		}
+	}
+}
+
+// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder.
+func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns SCTPChunkTypeMetadata.Name.
+func (a SCTPChunkType) String() string {
+	return SCTPChunkTypeMetadata[a].Name
+}
+
+// LayerType returns SCTPChunkTypeMetadata.LayerType.
+func (a SCTPChunkType) LayerType() gopacket.LayerType {
+	return SCTPChunkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForSCTPChunkType int
+
+func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForSCTPChunkType) Error() string {
+	return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a))
+}
+
+var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType
+var SCTPChunkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForSCTPChunkType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i)
+		SCTPChunkTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForSCTPChunkType[i],
+			Name:       "UnknownSCTPChunkType",
+		}
+	}
+}
+
+// Decoder calls PPPoECodeMetadata.DecodeWith's decoder.
+func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return PPPoECodeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPoECodeMetadata.Name.
+func (a PPPoECode) String() string {
+	return PPPoECodeMetadata[a].Name
+}
+
+// LayerType returns PPPoECodeMetadata.LayerType.
+func (a PPPoECode) LayerType() gopacket.LayerType {
+	return PPPoECodeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPoECode int
+
+func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForPPPoECode) Error() string {
+	return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a))
+}
+
+var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode
+var PPPoECodeMetadata [256]EnumMetadata
+
+func initUnknownTypesForPPPoECode() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i)
+		PPPoECodeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForPPPoECode[i],
+			Name:       "UnknownPPPoECode",
+		}
+	}
+}
+
+// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder.
+func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns FDDIFrameControlMetadata.Name.
+func (a FDDIFrameControl) String() string {
+	return FDDIFrameControlMetadata[a].Name
+}
+
+// LayerType returns FDDIFrameControlMetadata.LayerType.
+func (a FDDIFrameControl) LayerType() gopacket.LayerType {
+	return FDDIFrameControlMetadata[a].LayerType
+}
+
+type errorDecoderForFDDIFrameControl int
+
+func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForFDDIFrameControl) Error() string {
+	return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a))
+}
+
+var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl
+var FDDIFrameControlMetadata [256]EnumMetadata
+
+func initUnknownTypesForFDDIFrameControl() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i)
+		FDDIFrameControlMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForFDDIFrameControl[i],
+			Name:       "UnknownFDDIFrameControl",
+		}
+	}
+}
+
+// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder.
+func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EAPOLTypeMetadata.Name.
+func (a EAPOLType) String() string {
+	return EAPOLTypeMetadata[a].Name
+}
+
+// LayerType returns EAPOLTypeMetadata.LayerType.
+func (a EAPOLType) LayerType() gopacket.LayerType {
+	return EAPOLTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEAPOLType int
+
+func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForEAPOLType) Error() string {
+	return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a))
+}
+
+var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType
+var EAPOLTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForEAPOLType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i)
+		EAPOLTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForEAPOLType[i],
+			Name:       "UnknownEAPOLType",
+		}
+	}
+}
+
+// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder.
+func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns ProtocolFamilyMetadata.Name.
+func (a ProtocolFamily) String() string {
+	return ProtocolFamilyMetadata[a].Name
+}
+
+// LayerType returns ProtocolFamilyMetadata.LayerType.
+func (a ProtocolFamily) LayerType() gopacket.LayerType {
+	return ProtocolFamilyMetadata[a].LayerType
+}
+
+type errorDecoderForProtocolFamily int
+
+func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForProtocolFamily) Error() string {
+	return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a))
+}
+
+var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily
+var ProtocolFamilyMetadata [256]EnumMetadata
+
+func initUnknownTypesForProtocolFamily() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i)
+		ProtocolFamilyMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForProtocolFamily[i],
+			Name:       "UnknownProtocolFamily",
+		}
+	}
+}
+
+// Decoder calls Dot11TypeMetadata.DecodeWith's decoder.
+func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return Dot11TypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns Dot11TypeMetadata.Name.
+func (a Dot11Type) String() string {
+	return Dot11TypeMetadata[a].Name
+}
+
+// LayerType returns Dot11TypeMetadata.LayerType.
+func (a Dot11Type) LayerType() gopacket.LayerType {
+	return Dot11TypeMetadata[a].LayerType
+}
+
+type errorDecoderForDot11Type int
+
+func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForDot11Type) Error() string {
+	return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a))
+}
+
+var errorDecodersForDot11Type [256]errorDecoderForDot11Type
+var Dot11TypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForDot11Type() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i)
+		Dot11TypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForDot11Type[i],
+			Name:       "UnknownDot11Type",
+		}
+	}
+}
+
+// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder.
+func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns USBTransportTypeMetadata.Name.
+func (a USBTransportType) String() string {
+	return USBTransportTypeMetadata[a].Name
+}
+
+// LayerType returns USBTransportTypeMetadata.LayerType.
+func (a USBTransportType) LayerType() gopacket.LayerType {
+	return USBTransportTypeMetadata[a].LayerType
+}
+
+type errorDecoderForUSBTransportType int
+
+func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForUSBTransportType) Error() string {
+	return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a))
+}
+
+var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType
+var USBTransportTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForUSBTransportType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i)
+		USBTransportTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForUSBTransportType[i],
+			Name:       "UnknownUSBTransportType",
+		}
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/google/gopacket/layers/etherip.go
new file mode 100644
index 0000000..5b7b722
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/etherip.go
@@ -0,0 +1,45 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// EtherIP is the struct for storing RFC 3378 EtherIP packet headers.
+type EtherIP struct {
+	BaseLayer
+	Version  uint8
+	Reserved uint16
+}
+
+// LayerType returns gopacket.LayerTypeEtherIP.
+func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	e.Version = data[0] >> 4
+	e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff
+	e.BaseLayer = BaseLayer{data[:2], data[2:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EtherIP) CanDecode() gopacket.LayerClass {
+	return LayerTypeEtherIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EtherIP) NextLayerType() gopacket.LayerType {
+	return LayerTypeEthernet
+}
+
+func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error {
+	e := &EtherIP{}
+	return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/google/gopacket/layers/ethernet.go
new file mode 100644
index 0000000..b73748f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ethernet.go
@@ -0,0 +1,123 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/google/gopacket"
+	"net"
+)
+
+// EthernetBroadcast is the broadcast MAC address used by Ethernet.
+var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// Ethernet is the layer for Ethernet frame headers.
+type Ethernet struct {
+	BaseLayer
+	SrcMAC, DstMAC net.HardwareAddr
+	EthernetType   EthernetType
+	// Length is only set if a length field exists within this header.  Ethernet
+	// headers follow two different standards, one that uses an EthernetType, the
+	// other which defines a length the follows with a LLC header (802.3).  If the
+	// former is the case, we set EthernetType and Length stays 0.  In the latter
+	// case, we set Length and EthernetType = EthernetTypeLLC.
+	Length uint16
+}
+
+// LayerType returns LayerTypeEthernet
+func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet }
+
+func (e *Ethernet) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC)
+}
+
+func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 14 {
+		return errors.New("Ethernet packet too small")
+	}
+	eth.DstMAC = net.HardwareAddr(data[0:6])
+	eth.SrcMAC = net.HardwareAddr(data[6:12])
+	eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14]))
+	eth.BaseLayer = BaseLayer{data[:14], data[14:]}
+	eth.Length = 0
+	if eth.EthernetType < 0x0600 {
+		eth.Length = uint16(eth.EthernetType)
+		eth.EthernetType = EthernetTypeLLC
+		if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 {
+			df.SetTruncated()
+		} else if cmp > 0 {
+			// Strip off bytes at the end, since we have too many bytes
+			eth.Payload = eth.Payload[:len(eth.Payload)-cmp]
+		}
+		//	fmt.Println(eth)
+	}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if len(eth.DstMAC) != 6 {
+		return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC)
+	}
+	if len(eth.SrcMAC) != 6 {
+		return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC)
+	}
+	payload := b.Bytes()
+	bytes, err := b.PrependBytes(14)
+	if err != nil {
+		return err
+	}
+	copy(bytes, eth.DstMAC)
+	copy(bytes[6:], eth.SrcMAC)
+	if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC {
+		if opts.FixLengths {
+			eth.Length = uint16(len(payload))
+		}
+		if eth.EthernetType != EthernetTypeLLC {
+			return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length)
+		} else if eth.Length > 0x0600 {
+			return fmt.Errorf("invalid ethernet length %v", eth.Length)
+		}
+		binary.BigEndian.PutUint16(bytes[12:], eth.Length)
+	} else {
+		binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType))
+	}
+	length := len(b.Bytes())
+	if length < 60 {
+		// Pad out to 60 bytes.
+		padding, err := b.AppendBytes(60 - length)
+		if err != nil {
+			return err
+		}
+		copy(padding, lotsOfZeros[:])
+	}
+	return nil
+}
+
+func (eth *Ethernet) CanDecode() gopacket.LayerClass {
+	return LayerTypeEthernet
+}
+
+func (eth *Ethernet) NextLayerType() gopacket.LayerType {
+	return eth.EthernetType.LayerType()
+}
+
+func decodeEthernet(data []byte, p gopacket.PacketBuilder) error {
+	eth := &Ethernet{}
+	err := eth.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(eth)
+	p.SetLinkLayer(eth)
+	return p.NextDecoder(eth.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/google/gopacket/layers/fddi.go
new file mode 100644
index 0000000..ed9e195
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/fddi.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+	"net"
+)
+
+// FDDI contains the header for FDDI frames.
+type FDDI struct {
+	BaseLayer
+	FrameControl   FDDIFrameControl
+	Priority       uint8
+	SrcMAC, DstMAC net.HardwareAddr
+}
+
+// LayerType returns LayerTypeFDDI.
+func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI }
+
+// LinkFlow returns a new flow of type EndpointMAC.
+func (f *FDDI) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC)
+}
+
+func decodeFDDI(data []byte, p gopacket.PacketBuilder) error {
+	f := &FDDI{
+		FrameControl: FDDIFrameControl(data[0] & 0xF8),
+		Priority:     data[0] & 0x07,
+		SrcMAC:       net.HardwareAddr(data[1:7]),
+		DstMAC:       net.HardwareAddr(data[7:13]),
+		BaseLayer:    BaseLayer{data[:13], data[13:]},
+	}
+	p.SetLinkLayer(f)
+	p.AddLayer(f)
+	return p.NextDecoder(f.FrameControl)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/google/gopacket/layers/gen_linted.sh
new file mode 100644
index 0000000..75c701f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen_linted.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+for i in *.go; do golint $i | grep -q . || echo $i; done > .linted
diff --git a/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/google/gopacket/layers/geneve.go
new file mode 100644
index 0000000..72fe7c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/geneve.go
@@ -0,0 +1,110 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03
+// Geneve Header:
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Ver|  Opt Len  |O|C|    Rsvd.  |          Protocol Type        |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |        Virtual Network Identifier (VNI)       |    Reserved   |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                    Variable Length Options                    |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type Geneve struct {
+	BaseLayer
+	Version        uint8        // 2 bits
+	OptionsLength  uint8        // 6 bits
+	OAMPacket      bool         // 1 bits
+	CriticalOption bool         // 1 bits
+	Protocol       EthernetType // 16 bits
+	VNI            uint32       // 24bits
+	Options        []*GeneveOption
+}
+
+// Geneve Tunnel Options
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |          Option Class         |      Type     |R|R|R| Length  |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                      Variable Option Data                     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type GeneveOption struct {
+	Class  uint16 // 16 bits
+	Type   uint8  // 8 bits
+	Flags  uint8  // 3 bits
+	Length uint8  // 5 bits
+	Data   []byte
+}
+
+// LayerType returns LayerTypeGeneve
+func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve }
+
+func decodeGeneveOption(data []byte, gn *Geneve) (*GeneveOption, uint8) {
+	opt := &GeneveOption{}
+
+	opt.Class = binary.BigEndian.Uint16(data[0:2])
+	opt.Type = data[2]
+	opt.Flags = data[3] >> 4
+	opt.Length = (data[3]&0xf)*4 + 4
+
+	opt.Data = make([]byte, opt.Length-4)
+	copy(opt.Data, data[4:opt.Length])
+
+	return opt, opt.Length
+}
+
+func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 7 {
+		df.SetTruncated()
+		return errors.New("geneve packet too short")
+	}
+
+	gn.Version = data[0] >> 7
+	gn.OptionsLength = (data[0] & 0x3f) * 4
+
+	gn.OAMPacket = data[1]&0x80 > 0
+	gn.CriticalOption = data[1]&0x40 > 0
+	gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+
+	var buf [4]byte
+	copy(buf[1:], data[4:7])
+	gn.VNI = binary.BigEndian.Uint32(buf[:])
+
+	offset, length := uint8(8), int32(gn.OptionsLength)
+	if len(data) < int(length+7) {
+		df.SetTruncated()
+		return errors.New("geneve packet too short")
+	}
+
+	for length > 0 {
+		opt, len := decodeGeneveOption(data[offset:], gn)
+		gn.Options = append(gn.Options, opt)
+
+		length -= int32(len)
+		offset += len
+	}
+
+	gn.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+
+	return nil
+}
+
+func (gn *Geneve) NextLayerType() gopacket.LayerType {
+	return gn.Protocol.LayerType()
+}
+
+func decodeGeneve(data []byte, p gopacket.PacketBuilder) error {
+	gn := &Geneve{}
+	return decodingLayerDecoder(gn, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/google/gopacket/layers/gre.go
new file mode 100644
index 0000000..9c5e7d2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gre.go
@@ -0,0 +1,200 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+
+	"github.com/google/gopacket"
+)
+
+// GRE is a Generic Routing Encapsulation header.
+type GRE struct {
+	BaseLayer
+	ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool
+	RecursionControl, Flags, Version                                                       uint8
+	Protocol                                                                               EthernetType
+	Checksum, Offset                                                                       uint16
+	Key, Seq, Ack                                                                          uint32
+	*GRERouting
+}
+
+// GRERouting is GRE routing information, present if the RoutingPresent flag is
+// set.
+type GRERouting struct {
+	AddressFamily        uint16
+	SREOffset, SRELength uint8
+	RoutingInformation   []byte
+	Next                 *GRERouting
+}
+
+// LayerType returns gopacket.LayerTypeGRE.
+func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	g.ChecksumPresent = data[0]&0x80 != 0
+	g.RoutingPresent = data[0]&0x40 != 0
+	g.KeyPresent = data[0]&0x20 != 0
+	g.SeqPresent = data[0]&0x10 != 0
+	g.StrictSourceRoute = data[0]&0x08 != 0
+	g.AckPresent = data[1]&0x80 != 0
+	g.RecursionControl = data[0] & 0x7
+	g.Flags = data[1] >> 3
+	g.Version = data[1] & 0x7
+	g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	offset := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2])
+		g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		offset += 4
+	}
+	if g.KeyPresent {
+		g.Key = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	if g.SeqPresent {
+		g.Seq = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	if g.RoutingPresent {
+		tail := &g.GRERouting
+		for {
+			sre := &GRERouting{
+				AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]),
+				SREOffset:     data[offset+2],
+				SRELength:     data[offset+3],
+			}
+			sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)]
+			offset += 4 + int(sre.SRELength)
+			if sre.AddressFamily == 0 && sre.SRELength == 0 {
+				break
+			}
+			(*tail) = sre
+			tail = &sre.Next
+		}
+	}
+	if g.AckPresent {
+		g.Ack = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	g.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the SerializationBuffer,
+// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info.
+func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	size := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		size += 4
+	}
+	if g.KeyPresent {
+		size += 4
+	}
+	if g.SeqPresent {
+		size += 4
+	}
+	if g.RoutingPresent {
+		r := g.GRERouting
+		for r != nil {
+			size += 4 + int(r.SRELength)
+			r = r.Next
+		}
+		size += 4
+	}
+	if g.AckPresent {
+		size += 4
+	}
+	buf, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	// Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags.
+	buf[0] = 0
+	buf[1] = 0
+	if g.ChecksumPresent {
+		buf[0] |= 0x80
+	}
+	if g.RoutingPresent {
+		buf[0] |= 0x40
+	}
+	if g.KeyPresent {
+		buf[0] |= 0x20
+	}
+	if g.SeqPresent {
+		buf[0] |= 0x10
+	}
+	if g.StrictSourceRoute {
+		buf[0] |= 0x08
+	}
+	if g.AckPresent {
+		buf[1] |= 0x80
+	}
+	buf[0] |= g.RecursionControl
+	buf[1] |= g.Flags << 3
+	buf[1] |= g.Version
+	binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol))
+	offset := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		// Don't write the checksum value yet, as we may need to compute it,
+		// which requires the entire header be complete.
+		// Instead we zeroize the memory in case it is dirty.
+		buf[offset] = 0
+		buf[offset+1] = 0
+		binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset)
+		offset += 4
+	}
+	if g.KeyPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key)
+		offset += 4
+	}
+	if g.SeqPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq)
+		offset += 4
+	}
+	if g.RoutingPresent {
+		sre := g.GRERouting
+		for sre != nil {
+			binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily)
+			buf[offset+2] = sre.SREOffset
+			buf[offset+3] = sre.SRELength
+			copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation)
+			offset += 4 + int(sre.SRELength)
+			sre = sre.Next
+		}
+		// Terminate routing field with a "NULL" SRE.
+		binary.BigEndian.PutUint32(buf[offset:offset+4], 0)
+	}
+	if g.AckPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack)
+		offset += 4
+	}
+	if g.ChecksumPresent {
+		if opts.ComputeChecksums {
+			g.Checksum = tcpipChecksum(b.Bytes(), 0)
+		}
+
+		binary.BigEndian.PutUint16(buf[4:6], g.Checksum)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (g *GRE) CanDecode() gopacket.LayerClass {
+	return LayerTypeGRE
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (g *GRE) NextLayerType() gopacket.LayerType {
+	return g.Protocol.LayerType()
+}
+
+func decodeGRE(data []byte, p gopacket.PacketBuilder) error {
+	g := &GRE{}
+	return decodingLayerDecoder(g, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/google/gopacket/layers/gtp.go
new file mode 100644
index 0000000..0ec8a6a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gtp.go
@@ -0,0 +1,181 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+const gtpMinimumSizeInBytes int = 8
+
+// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP  without the need to use another version number.
+type GTPExtensionHeader struct {
+	Type    uint8
+	Content []byte
+}
+
+// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces.
+// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595
+type GTPv1U struct {
+	BaseLayer
+	Version             uint8
+	ProtocolType        uint8
+	Reserved            uint8
+	ExtensionHeaderFlag bool
+	SequenceNumberFlag  bool
+	NPDUFlag            bool
+	MessageType         uint8
+	MessageLength       uint16
+	TEID                uint32
+	SequenceNumber      uint16
+	NPDU                uint8
+	GTPExtensionHeaders []GTPExtensionHeader
+}
+
+// LayerType returns LayerTypeGTPV1U
+func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U }
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet
+func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	hLen := gtpMinimumSizeInBytes
+	dLen := len(data)
+	if dLen < hLen {
+		return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+	}
+	g.Version = (data[0] >> 5) & 0x07
+	g.ProtocolType = (data[0] >> 4) & 0x01
+	g.Reserved = (data[0] >> 3) & 0x01
+	g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1
+	g.NPDUFlag = (data[0] & 0x01) == 1
+	g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1
+	g.MessageType = data[1]
+	g.MessageLength = binary.BigEndian.Uint16(data[2:4])
+	pLen := 8 + g.MessageLength
+	if uint16(dLen) < pLen {
+		return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+	}
+	//  Field used to multiplex different connections in the same GTP tunnel.
+	g.TEID = binary.BigEndian.Uint32(data[4:8])
+	cIndex := uint16(hLen)
+	if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag {
+		hLen += 4
+		cIndex += 4
+		if dLen < hLen {
+			return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+		}
+		if g.SequenceNumberFlag {
+			g.SequenceNumber = binary.BigEndian.Uint16(data[8:10])
+		}
+		if g.NPDUFlag {
+			g.NPDU = data[10]
+		}
+		if g.ExtensionHeaderFlag {
+			extensionFlag := true
+			for extensionFlag {
+				extensionType := uint8(data[cIndex-1])
+				extensionLength := uint(data[cIndex])
+				if extensionLength == 0 {
+					return fmt.Errorf("GTP packet with invalid extension header")
+				}
+				// extensionLength is in 4-octet units
+				lIndex := cIndex + (uint16(extensionLength) * 4)
+				if uint16(dLen) < lIndex {
+					fmt.Println(dLen, lIndex)
+					return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen)
+				}
+				content := data[cIndex+1 : lIndex-1]
+				eh := GTPExtensionHeader{Type: extensionType, Content: content}
+				g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh)
+				cIndex = lIndex
+				// Check if coming bytes are from an extension header
+				extensionFlag = data[cIndex-1] != 0
+
+			}
+		}
+	}
+	g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]}
+	return nil
+
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(gtpMinimumSizeInBytes)
+	if err != nil {
+		return err
+	}
+	data[0] |= (g.Version << 5)
+	data[0] |= (1 << 4)
+	if len(g.GTPExtensionHeaders) > 0 {
+		data[0] |= 0x04
+		g.ExtensionHeaderFlag = true
+	}
+	if g.SequenceNumberFlag {
+		data[0] |= 0x02
+	}
+	if g.NPDUFlag {
+		data[0] |= 0x01
+	}
+	data[1] = g.MessageType
+	binary.BigEndian.PutUint16(data[2:4], g.MessageLength)
+	binary.BigEndian.PutUint32(data[4:8], g.TEID)
+	if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag {
+		data, err := b.AppendBytes(4)
+		if err != nil {
+			return err
+		}
+		binary.BigEndian.PutUint16(data[:2], g.SequenceNumber)
+		data[2] = g.NPDU
+		for _, eh := range g.GTPExtensionHeaders {
+			data[len(data)-1] = eh.Type
+			lContent := len(eh.Content)
+			// extensionLength is in 4-octet units
+			extensionLength := (lContent + 2) / 4
+			// Get two extra byte for the next extension header type and length
+			data, err = b.AppendBytes(lContent + 2)
+			if err != nil {
+				return err
+			}
+			data[0] = byte(extensionLength)
+			copy(data[1:lContent+1], eh.Content)
+		}
+	}
+	return nil
+
+}
+
+// CanDecode returns a set of layers that GTP objects can decode.
+func (g *GTPv1U) CanDecode() gopacket.LayerClass {
+	return LayerTypeGTPv1U
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+func (g *GTPv1U) NextLayerType() gopacket.LayerType {
+	version := uint8(g.LayerPayload()[0]) >> 4
+	if version == 4 {
+		return LayerTypeIPv4
+	} else if version == 6 {
+		return LayerTypeIPv6
+	} else {
+		return LayerTypePPP
+	}
+}
+
+func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error {
+	gtp := &GTPv1U{}
+	err := gtp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(gtp)
+	return p.NextDecoder(gtp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/google/gopacket/layers/iana_ports.go
new file mode 100644
index 0000000..ddcf3ec
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/iana_ports.go
@@ -0,0 +1,11351 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290
+// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml"
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+	1:     "tcpmux",
+	2:     "compressnet",
+	3:     "compressnet",
+	5:     "rje",
+	7:     "echo",
+	9:     "discard",
+	11:    "systat",
+	13:    "daytime",
+	17:    "qotd",
+	18:    "msp",
+	19:    "chargen",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	23:    "telnet",
+	25:    "smtp",
+	27:    "nsw-fe",
+	29:    "msg-icp",
+	31:    "msg-auth",
+	33:    "dsp",
+	37:    "time",
+	38:    "rap",
+	39:    "rlp",
+	41:    "graphics",
+	42:    "name",
+	43:    "nicname",
+	44:    "mpm-flags",
+	45:    "mpm",
+	46:    "mpm-snd",
+	48:    "auditd",
+	49:    "tacacs",
+	50:    "re-mail-ck",
+	52:    "xns-time",
+	53:    "domain",
+	54:    "xns-ch",
+	55:    "isi-gl",
+	56:    "xns-auth",
+	58:    "xns-mail",
+	62:    "acas",
+	63:    "whoispp",
+	64:    "covia",
+	65:    "tacacs-ds",
+	66:    "sql-net",
+	67:    "bootps",
+	68:    "bootpc",
+	69:    "tftp",
+	70:    "gopher",
+	71:    "netrjs-1",
+	72:    "netrjs-2",
+	73:    "netrjs-3",
+	74:    "netrjs-4",
+	76:    "deos",
+	78:    "vettcp",
+	79:    "finger",
+	80:    "http",
+	82:    "xfer",
+	83:    "mit-ml-dev",
+	84:    "ctf",
+	85:    "mit-ml-dev",
+	86:    "mfcobol",
+	88:    "kerberos",
+	89:    "su-mit-tg",
+	90:    "dnsix",
+	91:    "mit-dov",
+	92:    "npp",
+	93:    "dcp",
+	94:    "objcall",
+	95:    "supdup",
+	96:    "dixie",
+	97:    "swift-rvf",
+	98:    "tacnews",
+	99:    "metagram",
+	101:   "hostname",
+	102:   "iso-tsap",
+	103:   "gppitnp",
+	104:   "acr-nema",
+	105:   "cso",
+	106:   "3com-tsmux",
+	107:   "rtelnet",
+	108:   "snagas",
+	109:   "pop2",
+	110:   "pop3",
+	111:   "sunrpc",
+	112:   "mcidas",
+	113:   "ident",
+	115:   "sftp",
+	116:   "ansanotify",
+	117:   "uucp-path",
+	118:   "sqlserv",
+	119:   "nntp",
+	120:   "cfdptkt",
+	121:   "erpc",
+	122:   "smakynet",
+	123:   "ntp",
+	124:   "ansatrader",
+	125:   "locus-map",
+	126:   "nxedit",
+	127:   "locus-con",
+	128:   "gss-xlicen",
+	129:   "pwdgen",
+	130:   "cisco-fna",
+	131:   "cisco-tna",
+	132:   "cisco-sys",
+	133:   "statsrv",
+	134:   "ingres-net",
+	135:   "epmap",
+	136:   "profile",
+	137:   "netbios-ns",
+	138:   "netbios-dgm",
+	139:   "netbios-ssn",
+	140:   "emfis-data",
+	141:   "emfis-cntl",
+	142:   "bl-idm",
+	143:   "imap",
+	144:   "uma",
+	145:   "uaac",
+	146:   "iso-tp0",
+	147:   "iso-ip",
+	148:   "jargon",
+	149:   "aed-512",
+	150:   "sql-net",
+	151:   "hems",
+	152:   "bftp",
+	153:   "sgmp",
+	154:   "netsc-prod",
+	155:   "netsc-dev",
+	156:   "sqlsrv",
+	157:   "knet-cmp",
+	158:   "pcmail-srv",
+	159:   "nss-routing",
+	160:   "sgmp-traps",
+	161:   "snmp",
+	162:   "snmptrap",
+	163:   "cmip-man",
+	164:   "cmip-agent",
+	165:   "xns-courier",
+	166:   "s-net",
+	167:   "namp",
+	168:   "rsvd",
+	169:   "send",
+	170:   "print-srv",
+	171:   "multiplex",
+	172:   "cl-1",
+	173:   "xyplex-mux",
+	174:   "mailq",
+	175:   "vmnet",
+	176:   "genrad-mux",
+	177:   "xdmcp",
+	178:   "nextstep",
+	179:   "bgp",
+	180:   "ris",
+	181:   "unify",
+	182:   "audit",
+	183:   "ocbinder",
+	184:   "ocserver",
+	185:   "remote-kis",
+	186:   "kis",
+	187:   "aci",
+	188:   "mumps",
+	189:   "qft",
+	190:   "gacp",
+	191:   "prospero",
+	192:   "osu-nms",
+	193:   "srmp",
+	194:   "irc",
+	195:   "dn6-nlm-aud",
+	196:   "dn6-smm-red",
+	197:   "dls",
+	198:   "dls-mon",
+	199:   "smux",
+	200:   "src",
+	201:   "at-rtmp",
+	202:   "at-nbp",
+	203:   "at-3",
+	204:   "at-echo",
+	205:   "at-5",
+	206:   "at-zis",
+	207:   "at-7",
+	208:   "at-8",
+	209:   "qmtp",
+	210:   "z39-50",
+	211:   "914c-g",
+	212:   "anet",
+	213:   "ipx",
+	214:   "vmpwscs",
+	215:   "softpc",
+	216:   "CAIlic",
+	217:   "dbase",
+	218:   "mpp",
+	219:   "uarps",
+	220:   "imap3",
+	221:   "fln-spx",
+	222:   "rsh-spx",
+	223:   "cdc",
+	224:   "masqdialer",
+	242:   "direct",
+	243:   "sur-meas",
+	244:   "inbusiness",
+	245:   "link",
+	246:   "dsp3270",
+	247:   "subntbcst-tftp",
+	248:   "bhfhs",
+	256:   "rap",
+	257:   "set",
+	259:   "esro-gen",
+	260:   "openport",
+	261:   "nsiiops",
+	262:   "arcisdms",
+	263:   "hdap",
+	264:   "bgmp",
+	265:   "x-bone-ctl",
+	266:   "sst",
+	267:   "td-service",
+	268:   "td-replica",
+	269:   "manet",
+	271:   "pt-tls",
+	280:   "http-mgmt",
+	281:   "personal-link",
+	282:   "cableport-ax",
+	283:   "rescap",
+	284:   "corerjd",
+	286:   "fxp",
+	287:   "k-block",
+	308:   "novastorbakcup",
+	309:   "entrusttime",
+	310:   "bhmds",
+	311:   "asip-webadmin",
+	312:   "vslmp",
+	313:   "magenta-logic",
+	314:   "opalis-robot",
+	315:   "dpsi",
+	316:   "decauth",
+	317:   "zannet",
+	318:   "pkix-timestamp",
+	319:   "ptp-event",
+	320:   "ptp-general",
+	321:   "pip",
+	322:   "rtsps",
+	323:   "rpki-rtr",
+	324:   "rpki-rtr-tls",
+	333:   "texar",
+	344:   "pdap",
+	345:   "pawserv",
+	346:   "zserv",
+	347:   "fatserv",
+	348:   "csi-sgwp",
+	349:   "mftp",
+	350:   "matip-type-a",
+	351:   "matip-type-b",
+	352:   "dtag-ste-sb",
+	353:   "ndsauth",
+	354:   "bh611",
+	355:   "datex-asn",
+	356:   "cloanto-net-1",
+	357:   "bhevent",
+	358:   "shrinkwrap",
+	359:   "nsrmp",
+	360:   "scoi2odialog",
+	361:   "semantix",
+	362:   "srssend",
+	363:   "rsvp-tunnel",
+	364:   "aurora-cmgr",
+	365:   "dtk",
+	366:   "odmr",
+	367:   "mortgageware",
+	368:   "qbikgdp",
+	369:   "rpc2portmap",
+	370:   "codaauth2",
+	371:   "clearcase",
+	372:   "ulistproc",
+	373:   "legent-1",
+	374:   "legent-2",
+	375:   "hassle",
+	376:   "nip",
+	377:   "tnETOS",
+	378:   "dsETOS",
+	379:   "is99c",
+	380:   "is99s",
+	381:   "hp-collector",
+	382:   "hp-managed-node",
+	383:   "hp-alarm-mgr",
+	384:   "arns",
+	385:   "ibm-app",
+	386:   "asa",
+	387:   "aurp",
+	388:   "unidata-ldm",
+	389:   "ldap",
+	390:   "uis",
+	391:   "synotics-relay",
+	392:   "synotics-broker",
+	393:   "meta5",
+	394:   "embl-ndt",
+	395:   "netcp",
+	396:   "netware-ip",
+	397:   "mptn",
+	398:   "kryptolan",
+	399:   "iso-tsap-c2",
+	400:   "osb-sd",
+	401:   "ups",
+	402:   "genie",
+	403:   "decap",
+	404:   "nced",
+	405:   "ncld",
+	406:   "imsp",
+	407:   "timbuktu",
+	408:   "prm-sm",
+	409:   "prm-nm",
+	410:   "decladebug",
+	411:   "rmt",
+	412:   "synoptics-trap",
+	413:   "smsp",
+	414:   "infoseek",
+	415:   "bnet",
+	416:   "silverplatter",
+	417:   "onmux",
+	418:   "hyper-g",
+	419:   "ariel1",
+	420:   "smpte",
+	421:   "ariel2",
+	422:   "ariel3",
+	423:   "opc-job-start",
+	424:   "opc-job-track",
+	425:   "icad-el",
+	426:   "smartsdp",
+	427:   "svrloc",
+	428:   "ocs-cmu",
+	429:   "ocs-amu",
+	430:   "utmpsd",
+	431:   "utmpcd",
+	432:   "iasd",
+	433:   "nnsp",
+	434:   "mobileip-agent",
+	435:   "mobilip-mn",
+	436:   "dna-cml",
+	437:   "comscm",
+	438:   "dsfgw",
+	439:   "dasp",
+	440:   "sgcp",
+	441:   "decvms-sysmgt",
+	442:   "cvc-hostd",
+	443:   "https",
+	444:   "snpp",
+	445:   "microsoft-ds",
+	446:   "ddm-rdb",
+	447:   "ddm-dfm",
+	448:   "ddm-ssl",
+	449:   "as-servermap",
+	450:   "tserver",
+	451:   "sfs-smp-net",
+	452:   "sfs-config",
+	453:   "creativeserver",
+	454:   "contentserver",
+	455:   "creativepartnr",
+	456:   "macon-tcp",
+	457:   "scohelp",
+	458:   "appleqtc",
+	459:   "ampr-rcmd",
+	460:   "skronk",
+	461:   "datasurfsrv",
+	462:   "datasurfsrvsec",
+	463:   "alpes",
+	464:   "kpasswd",
+	465:   "urd",
+	466:   "digital-vrc",
+	467:   "mylex-mapd",
+	468:   "photuris",
+	469:   "rcp",
+	470:   "scx-proxy",
+	471:   "mondex",
+	472:   "ljk-login",
+	473:   "hybrid-pop",
+	474:   "tn-tl-w1",
+	475:   "tcpnethaspsrv",
+	476:   "tn-tl-fd1",
+	477:   "ss7ns",
+	478:   "spsc",
+	479:   "iafserver",
+	480:   "iafdbase",
+	481:   "ph",
+	482:   "bgs-nsi",
+	483:   "ulpnet",
+	484:   "integra-sme",
+	485:   "powerburst",
+	486:   "avian",
+	487:   "saft",
+	488:   "gss-http",
+	489:   "nest-protocol",
+	490:   "micom-pfs",
+	491:   "go-login",
+	492:   "ticf-1",
+	493:   "ticf-2",
+	494:   "pov-ray",
+	495:   "intecourier",
+	496:   "pim-rp-disc",
+	497:   "retrospect",
+	498:   "siam",
+	499:   "iso-ill",
+	500:   "isakmp",
+	501:   "stmf",
+	502:   "mbap",
+	503:   "intrinsa",
+	504:   "citadel",
+	505:   "mailbox-lm",
+	506:   "ohimsrv",
+	507:   "crs",
+	508:   "xvttp",
+	509:   "snare",
+	510:   "fcp",
+	511:   "passgo",
+	512:   "exec",
+	513:   "login",
+	514:   "shell",
+	515:   "printer",
+	516:   "videotex",
+	517:   "talk",
+	518:   "ntalk",
+	519:   "utime",
+	520:   "efs",
+	521:   "ripng",
+	522:   "ulp",
+	523:   "ibm-db2",
+	524:   "ncp",
+	525:   "timed",
+	526:   "tempo",
+	527:   "stx",
+	528:   "custix",
+	529:   "irc-serv",
+	530:   "courier",
+	531:   "conference",
+	532:   "netnews",
+	533:   "netwall",
+	534:   "windream",
+	535:   "iiop",
+	536:   "opalis-rdv",
+	537:   "nmsp",
+	538:   "gdomap",
+	539:   "apertus-ldp",
+	540:   "uucp",
+	541:   "uucp-rlogin",
+	542:   "commerce",
+	543:   "klogin",
+	544:   "kshell",
+	545:   "appleqtcsrvr",
+	546:   "dhcpv6-client",
+	547:   "dhcpv6-server",
+	548:   "afpovertcp",
+	549:   "idfp",
+	550:   "new-rwho",
+	551:   "cybercash",
+	552:   "devshr-nts",
+	553:   "pirp",
+	554:   "rtsp",
+	555:   "dsf",
+	556:   "remotefs",
+	557:   "openvms-sysipc",
+	558:   "sdnskmp",
+	559:   "teedtap",
+	560:   "rmonitor",
+	561:   "monitor",
+	562:   "chshell",
+	563:   "nntps",
+	564:   "9pfs",
+	565:   "whoami",
+	566:   "streettalk",
+	567:   "banyan-rpc",
+	568:   "ms-shuttle",
+	569:   "ms-rome",
+	570:   "meter",
+	571:   "meter",
+	572:   "sonar",
+	573:   "banyan-vip",
+	574:   "ftp-agent",
+	575:   "vemmi",
+	576:   "ipcd",
+	577:   "vnas",
+	578:   "ipdd",
+	579:   "decbsrv",
+	580:   "sntp-heartbeat",
+	581:   "bdp",
+	582:   "scc-security",
+	583:   "philips-vc",
+	584:   "keyserver",
+	586:   "password-chg",
+	587:   "submission",
+	588:   "cal",
+	589:   "eyelink",
+	590:   "tns-cml",
+	591:   "http-alt",
+	592:   "eudora-set",
+	593:   "http-rpc-epmap",
+	594:   "tpip",
+	595:   "cab-protocol",
+	596:   "smsd",
+	597:   "ptcnameservice",
+	598:   "sco-websrvrmg3",
+	599:   "acp",
+	600:   "ipcserver",
+	601:   "syslog-conn",
+	602:   "xmlrpc-beep",
+	603:   "idxp",
+	604:   "tunnel",
+	605:   "soap-beep",
+	606:   "urm",
+	607:   "nqs",
+	608:   "sift-uft",
+	609:   "npmp-trap",
+	610:   "npmp-local",
+	611:   "npmp-gui",
+	612:   "hmmp-ind",
+	613:   "hmmp-op",
+	614:   "sshell",
+	615:   "sco-inetmgr",
+	616:   "sco-sysmgr",
+	617:   "sco-dtmgr",
+	618:   "dei-icda",
+	619:   "compaq-evm",
+	620:   "sco-websrvrmgr",
+	621:   "escp-ip",
+	622:   "collaborator",
+	623:   "oob-ws-http",
+	624:   "cryptoadmin",
+	625:   "dec-dlm",
+	626:   "asia",
+	627:   "passgo-tivoli",
+	628:   "qmqp",
+	629:   "3com-amp3",
+	630:   "rda",
+	631:   "ipp",
+	632:   "bmpp",
+	633:   "servstat",
+	634:   "ginad",
+	635:   "rlzdbase",
+	636:   "ldaps",
+	637:   "lanserver",
+	638:   "mcns-sec",
+	639:   "msdp",
+	640:   "entrust-sps",
+	641:   "repcmd",
+	642:   "esro-emsdp",
+	643:   "sanity",
+	644:   "dwr",
+	645:   "pssc",
+	646:   "ldp",
+	647:   "dhcp-failover",
+	648:   "rrp",
+	649:   "cadview-3d",
+	650:   "obex",
+	651:   "ieee-mms",
+	652:   "hello-port",
+	653:   "repscmd",
+	654:   "aodv",
+	655:   "tinc",
+	656:   "spmp",
+	657:   "rmc",
+	658:   "tenfold",
+	660:   "mac-srvr-admin",
+	661:   "hap",
+	662:   "pftp",
+	663:   "purenoise",
+	664:   "oob-ws-https",
+	665:   "sun-dr",
+	666:   "mdqs",
+	667:   "disclose",
+	668:   "mecomm",
+	669:   "meregister",
+	670:   "vacdsm-sws",
+	671:   "vacdsm-app",
+	672:   "vpps-qua",
+	673:   "cimplex",
+	674:   "acap",
+	675:   "dctp",
+	676:   "vpps-via",
+	677:   "vpp",
+	678:   "ggf-ncp",
+	679:   "mrm",
+	680:   "entrust-aaas",
+	681:   "entrust-aams",
+	682:   "xfr",
+	683:   "corba-iiop",
+	684:   "corba-iiop-ssl",
+	685:   "mdc-portmapper",
+	686:   "hcp-wismar",
+	687:   "asipregistry",
+	688:   "realm-rusd",
+	689:   "nmap",
+	690:   "vatp",
+	691:   "msexch-routing",
+	692:   "hyperwave-isp",
+	693:   "connendp",
+	694:   "ha-cluster",
+	695:   "ieee-mms-ssl",
+	696:   "rushd",
+	697:   "uuidgen",
+	698:   "olsr",
+	699:   "accessnetwork",
+	700:   "epp",
+	701:   "lmp",
+	702:   "iris-beep",
+	704:   "elcsd",
+	705:   "agentx",
+	706:   "silc",
+	707:   "borland-dsj",
+	709:   "entrust-kmsh",
+	710:   "entrust-ash",
+	711:   "cisco-tdp",
+	712:   "tbrpf",
+	713:   "iris-xpc",
+	714:   "iris-xpcs",
+	715:   "iris-lwz",
+	729:   "netviewdm1",
+	730:   "netviewdm2",
+	731:   "netviewdm3",
+	741:   "netgw",
+	742:   "netrcs",
+	744:   "flexlm",
+	747:   "fujitsu-dev",
+	748:   "ris-cm",
+	749:   "kerberos-adm",
+	750:   "rfile",
+	751:   "pump",
+	752:   "qrh",
+	753:   "rrh",
+	754:   "tell",
+	758:   "nlogin",
+	759:   "con",
+	760:   "ns",
+	761:   "rxe",
+	762:   "quotad",
+	763:   "cycleserv",
+	764:   "omserv",
+	765:   "webster",
+	767:   "phonebook",
+	769:   "vid",
+	770:   "cadlock",
+	771:   "rtip",
+	772:   "cycleserv2",
+	773:   "submit",
+	774:   "rpasswd",
+	775:   "entomb",
+	776:   "wpages",
+	777:   "multiling-http",
+	780:   "wpgs",
+	800:   "mdbs-daemon",
+	801:   "device",
+	802:   "mbap-s",
+	810:   "fcp-udp",
+	828:   "itm-mcell-s",
+	829:   "pkix-3-ca-ra",
+	830:   "netconf-ssh",
+	831:   "netconf-beep",
+	832:   "netconfsoaphttp",
+	833:   "netconfsoapbeep",
+	847:   "dhcp-failover2",
+	848:   "gdoi",
+	853:   "domain-s",
+	854:   "dlep",
+	860:   "iscsi",
+	861:   "owamp-control",
+	862:   "twamp-control",
+	873:   "rsync",
+	886:   "iclcnet-locate",
+	887:   "iclcnet-svinfo",
+	888:   "accessbuilder",
+	900:   "omginitialrefs",
+	901:   "smpnameres",
+	902:   "ideafarm-door",
+	903:   "ideafarm-panic",
+	910:   "kink",
+	911:   "xact-backup",
+	912:   "apex-mesh",
+	913:   "apex-edge",
+	953:   "rndc",
+	989:   "ftps-data",
+	990:   "ftps",
+	991:   "nas",
+	992:   "telnets",
+	993:   "imaps",
+	995:   "pop3s",
+	996:   "vsinet",
+	997:   "maitrd",
+	998:   "busboy",
+	999:   "garcon",
+	1000:  "cadlock2",
+	1001:  "webpush",
+	1010:  "surf",
+	1021:  "exp1",
+	1022:  "exp2",
+	1025:  "blackjack",
+	1026:  "cap",
+	1029:  "solid-mux",
+	1033:  "netinfo-local",
+	1034:  "activesync",
+	1035:  "mxxrlogin",
+	1036:  "nsstp",
+	1037:  "ams",
+	1038:  "mtqp",
+	1039:  "sbl",
+	1040:  "netarx",
+	1041:  "danf-ak2",
+	1042:  "afrog",
+	1043:  "boinc-client",
+	1044:  "dcutility",
+	1045:  "fpitp",
+	1046:  "wfremotertm",
+	1047:  "neod1",
+	1048:  "neod2",
+	1049:  "td-postman",
+	1050:  "cma",
+	1051:  "optima-vnet",
+	1052:  "ddt",
+	1053:  "remote-as",
+	1054:  "brvread",
+	1055:  "ansyslmd",
+	1056:  "vfo",
+	1057:  "startron",
+	1058:  "nim",
+	1059:  "nimreg",
+	1060:  "polestar",
+	1061:  "kiosk",
+	1062:  "veracity",
+	1063:  "kyoceranetdev",
+	1064:  "jstel",
+	1065:  "syscomlan",
+	1066:  "fpo-fns",
+	1067:  "instl-boots",
+	1068:  "instl-bootc",
+	1069:  "cognex-insight",
+	1070:  "gmrupdateserv",
+	1071:  "bsquare-voip",
+	1072:  "cardax",
+	1073:  "bridgecontrol",
+	1074:  "warmspotMgmt",
+	1075:  "rdrmshc",
+	1076:  "dab-sti-c",
+	1077:  "imgames",
+	1078:  "avocent-proxy",
+	1079:  "asprovatalk",
+	1080:  "socks",
+	1081:  "pvuniwien",
+	1082:  "amt-esd-prot",
+	1083:  "ansoft-lm-1",
+	1084:  "ansoft-lm-2",
+	1085:  "webobjects",
+	1086:  "cplscrambler-lg",
+	1087:  "cplscrambler-in",
+	1088:  "cplscrambler-al",
+	1089:  "ff-annunc",
+	1090:  "ff-fms",
+	1091:  "ff-sm",
+	1092:  "obrpd",
+	1093:  "proofd",
+	1094:  "rootd",
+	1095:  "nicelink",
+	1096:  "cnrprotocol",
+	1097:  "sunclustermgr",
+	1098:  "rmiactivation",
+	1099:  "rmiregistry",
+	1100:  "mctp",
+	1101:  "pt2-discover",
+	1102:  "adobeserver-1",
+	1103:  "adobeserver-2",
+	1104:  "xrl",
+	1105:  "ftranhc",
+	1106:  "isoipsigport-1",
+	1107:  "isoipsigport-2",
+	1108:  "ratio-adp",
+	1110:  "webadmstart",
+	1111:  "lmsocialserver",
+	1112:  "icp",
+	1113:  "ltp-deepspace",
+	1114:  "mini-sql",
+	1115:  "ardus-trns",
+	1116:  "ardus-cntl",
+	1117:  "ardus-mtrns",
+	1118:  "sacred",
+	1119:  "bnetgame",
+	1120:  "bnetfile",
+	1121:  "rmpp",
+	1122:  "availant-mgr",
+	1123:  "murray",
+	1124:  "hpvmmcontrol",
+	1125:  "hpvmmagent",
+	1126:  "hpvmmdata",
+	1127:  "kwdb-commn",
+	1128:  "saphostctrl",
+	1129:  "saphostctrls",
+	1130:  "casp",
+	1131:  "caspssl",
+	1132:  "kvm-via-ip",
+	1133:  "dfn",
+	1134:  "aplx",
+	1135:  "omnivision",
+	1136:  "hhb-gateway",
+	1137:  "trim",
+	1138:  "encrypted-admin",
+	1139:  "evm",
+	1140:  "autonoc",
+	1141:  "mxomss",
+	1142:  "edtools",
+	1143:  "imyx",
+	1144:  "fuscript",
+	1145:  "x9-icue",
+	1146:  "audit-transfer",
+	1147:  "capioverlan",
+	1148:  "elfiq-repl",
+	1149:  "bvtsonar",
+	1150:  "blaze",
+	1151:  "unizensus",
+	1152:  "winpoplanmess",
+	1153:  "c1222-acse",
+	1154:  "resacommunity",
+	1155:  "nfa",
+	1156:  "iascontrol-oms",
+	1157:  "iascontrol",
+	1158:  "dbcontrol-oms",
+	1159:  "oracle-oms",
+	1160:  "olsv",
+	1161:  "health-polling",
+	1162:  "health-trap",
+	1163:  "sddp",
+	1164:  "qsm-proxy",
+	1165:  "qsm-gui",
+	1166:  "qsm-remote",
+	1167:  "cisco-ipsla",
+	1168:  "vchat",
+	1169:  "tripwire",
+	1170:  "atc-lm",
+	1171:  "atc-appserver",
+	1172:  "dnap",
+	1173:  "d-cinema-rrp",
+	1174:  "fnet-remote-ui",
+	1175:  "dossier",
+	1176:  "indigo-server",
+	1177:  "dkmessenger",
+	1178:  "sgi-storman",
+	1179:  "b2n",
+	1180:  "mc-client",
+	1181:  "3comnetman",
+	1182:  "accelenet",
+	1183:  "llsurfup-http",
+	1184:  "llsurfup-https",
+	1185:  "catchpole",
+	1186:  "mysql-cluster",
+	1187:  "alias",
+	1188:  "hp-webadmin",
+	1189:  "unet",
+	1190:  "commlinx-avl",
+	1191:  "gpfs",
+	1192:  "caids-sensor",
+	1193:  "fiveacross",
+	1194:  "openvpn",
+	1195:  "rsf-1",
+	1196:  "netmagic",
+	1197:  "carrius-rshell",
+	1198:  "cajo-discovery",
+	1199:  "dmidi",
+	1200:  "scol",
+	1201:  "nucleus-sand",
+	1202:  "caiccipc",
+	1203:  "ssslic-mgr",
+	1204:  "ssslog-mgr",
+	1205:  "accord-mgc",
+	1206:  "anthony-data",
+	1207:  "metasage",
+	1208:  "seagull-ais",
+	1209:  "ipcd3",
+	1210:  "eoss",
+	1211:  "groove-dpp",
+	1212:  "lupa",
+	1213:  "mpc-lifenet",
+	1214:  "kazaa",
+	1215:  "scanstat-1",
+	1216:  "etebac5",
+	1217:  "hpss-ndapi",
+	1218:  "aeroflight-ads",
+	1219:  "aeroflight-ret",
+	1220:  "qt-serveradmin",
+	1221:  "sweetware-apps",
+	1222:  "nerv",
+	1223:  "tgp",
+	1224:  "vpnz",
+	1225:  "slinkysearch",
+	1226:  "stgxfws",
+	1227:  "dns2go",
+	1228:  "florence",
+	1229:  "zented",
+	1230:  "periscope",
+	1231:  "menandmice-lpm",
+	1232:  "first-defense",
+	1233:  "univ-appserver",
+	1234:  "search-agent",
+	1235:  "mosaicsyssvc1",
+	1236:  "bvcontrol",
+	1237:  "tsdos390",
+	1238:  "hacl-qs",
+	1239:  "nmsd",
+	1240:  "instantia",
+	1241:  "nessus",
+	1242:  "nmasoverip",
+	1243:  "serialgateway",
+	1244:  "isbconference1",
+	1245:  "isbconference2",
+	1246:  "payrouter",
+	1247:  "visionpyramid",
+	1248:  "hermes",
+	1249:  "mesavistaco",
+	1250:  "swldy-sias",
+	1251:  "servergraph",
+	1252:  "bspne-pcc",
+	1253:  "q55-pcc",
+	1254:  "de-noc",
+	1255:  "de-cache-query",
+	1256:  "de-server",
+	1257:  "shockwave2",
+	1258:  "opennl",
+	1259:  "opennl-voice",
+	1260:  "ibm-ssd",
+	1261:  "mpshrsv",
+	1262:  "qnts-orb",
+	1263:  "dka",
+	1264:  "prat",
+	1265:  "dssiapi",
+	1266:  "dellpwrappks",
+	1267:  "epc",
+	1268:  "propel-msgsys",
+	1269:  "watilapp",
+	1270:  "opsmgr",
+	1271:  "excw",
+	1272:  "cspmlockmgr",
+	1273:  "emc-gateway",
+	1274:  "t1distproc",
+	1275:  "ivcollector",
+	1277:  "miva-mqs",
+	1278:  "dellwebadmin-1",
+	1279:  "dellwebadmin-2",
+	1280:  "pictrography",
+	1281:  "healthd",
+	1282:  "emperion",
+	1283:  "productinfo",
+	1284:  "iee-qfx",
+	1285:  "neoiface",
+	1286:  "netuitive",
+	1287:  "routematch",
+	1288:  "navbuddy",
+	1289:  "jwalkserver",
+	1290:  "winjaserver",
+	1291:  "seagulllms",
+	1292:  "dsdn",
+	1293:  "pkt-krb-ipsec",
+	1294:  "cmmdriver",
+	1295:  "ehtp",
+	1296:  "dproxy",
+	1297:  "sdproxy",
+	1298:  "lpcp",
+	1299:  "hp-sci",
+	1300:  "h323hostcallsc",
+	1301:  "ci3-software-1",
+	1302:  "ci3-software-2",
+	1303:  "sftsrv",
+	1304:  "boomerang",
+	1305:  "pe-mike",
+	1306:  "re-conn-proto",
+	1307:  "pacmand",
+	1308:  "odsi",
+	1309:  "jtag-server",
+	1310:  "husky",
+	1311:  "rxmon",
+	1312:  "sti-envision",
+	1313:  "bmc-patroldb",
+	1314:  "pdps",
+	1315:  "els",
+	1316:  "exbit-escp",
+	1317:  "vrts-ipcserver",
+	1318:  "krb5gatekeeper",
+	1319:  "amx-icsp",
+	1320:  "amx-axbnet",
+	1321:  "pip",
+	1322:  "novation",
+	1323:  "brcd",
+	1324:  "delta-mcp",
+	1325:  "dx-instrument",
+	1326:  "wimsic",
+	1327:  "ultrex",
+	1328:  "ewall",
+	1329:  "netdb-export",
+	1330:  "streetperfect",
+	1331:  "intersan",
+	1332:  "pcia-rxp-b",
+	1333:  "passwrd-policy",
+	1334:  "writesrv",
+	1335:  "digital-notary",
+	1336:  "ischat",
+	1337:  "menandmice-dns",
+	1338:  "wmc-log-svc",
+	1339:  "kjtsiteserver",
+	1340:  "naap",
+	1341:  "qubes",
+	1342:  "esbroker",
+	1343:  "re101",
+	1344:  "icap",
+	1345:  "vpjp",
+	1346:  "alta-ana-lm",
+	1347:  "bbn-mmc",
+	1348:  "bbn-mmx",
+	1349:  "sbook",
+	1350:  "editbench",
+	1351:  "equationbuilder",
+	1352:  "lotusnote",
+	1353:  "relief",
+	1354:  "XSIP-network",
+	1355:  "intuitive-edge",
+	1356:  "cuillamartin",
+	1357:  "pegboard",
+	1358:  "connlcli",
+	1359:  "ftsrv",
+	1360:  "mimer",
+	1361:  "linx",
+	1362:  "timeflies",
+	1363:  "ndm-requester",
+	1364:  "ndm-server",
+	1365:  "adapt-sna",
+	1366:  "netware-csp",
+	1367:  "dcs",
+	1368:  "screencast",
+	1369:  "gv-us",
+	1370:  "us-gv",
+	1371:  "fc-cli",
+	1372:  "fc-ser",
+	1373:  "chromagrafx",
+	1374:  "molly",
+	1375:  "bytex",
+	1376:  "ibm-pps",
+	1377:  "cichlid",
+	1378:  "elan",
+	1379:  "dbreporter",
+	1380:  "telesis-licman",
+	1381:  "apple-licman",
+	1382:  "udt-os",
+	1383:  "gwha",
+	1384:  "os-licman",
+	1385:  "atex-elmd",
+	1386:  "checksum",
+	1387:  "cadsi-lm",
+	1388:  "objective-dbc",
+	1389:  "iclpv-dm",
+	1390:  "iclpv-sc",
+	1391:  "iclpv-sas",
+	1392:  "iclpv-pm",
+	1393:  "iclpv-nls",
+	1394:  "iclpv-nlc",
+	1395:  "iclpv-wsm",
+	1396:  "dvl-activemail",
+	1397:  "audio-activmail",
+	1398:  "video-activmail",
+	1399:  "cadkey-licman",
+	1400:  "cadkey-tablet",
+	1401:  "goldleaf-licman",
+	1402:  "prm-sm-np",
+	1403:  "prm-nm-np",
+	1404:  "igi-lm",
+	1405:  "ibm-res",
+	1406:  "netlabs-lm",
+	1407:  "tibet-server",
+	1408:  "sophia-lm",
+	1409:  "here-lm",
+	1410:  "hiq",
+	1411:  "af",
+	1412:  "innosys",
+	1413:  "innosys-acl",
+	1414:  "ibm-mqseries",
+	1415:  "dbstar",
+	1416:  "novell-lu6-2",
+	1417:  "timbuktu-srv1",
+	1418:  "timbuktu-srv2",
+	1419:  "timbuktu-srv3",
+	1420:  "timbuktu-srv4",
+	1421:  "gandalf-lm",
+	1422:  "autodesk-lm",
+	1423:  "essbase",
+	1424:  "hybrid",
+	1425:  "zion-lm",
+	1426:  "sais",
+	1427:  "mloadd",
+	1428:  "informatik-lm",
+	1429:  "nms",
+	1430:  "tpdu",
+	1431:  "rgtp",
+	1432:  "blueberry-lm",
+	1433:  "ms-sql-s",
+	1434:  "ms-sql-m",
+	1435:  "ibm-cics",
+	1436:  "saism",
+	1437:  "tabula",
+	1438:  "eicon-server",
+	1439:  "eicon-x25",
+	1440:  "eicon-slp",
+	1441:  "cadis-1",
+	1442:  "cadis-2",
+	1443:  "ies-lm",
+	1444:  "marcam-lm",
+	1445:  "proxima-lm",
+	1446:  "ora-lm",
+	1447:  "apri-lm",
+	1448:  "oc-lm",
+	1449:  "peport",
+	1450:  "dwf",
+	1451:  "infoman",
+	1452:  "gtegsc-lm",
+	1453:  "genie-lm",
+	1454:  "interhdl-elmd",
+	1455:  "esl-lm",
+	1456:  "dca",
+	1457:  "valisys-lm",
+	1458:  "nrcabq-lm",
+	1459:  "proshare1",
+	1460:  "proshare2",
+	1461:  "ibm-wrless-lan",
+	1462:  "world-lm",
+	1463:  "nucleus",
+	1464:  "msl-lmd",
+	1465:  "pipes",
+	1466:  "oceansoft-lm",
+	1467:  "csdmbase",
+	1468:  "csdm",
+	1469:  "aal-lm",
+	1470:  "uaiact",
+	1471:  "csdmbase",
+	1472:  "csdm",
+	1473:  "openmath",
+	1474:  "telefinder",
+	1475:  "taligent-lm",
+	1476:  "clvm-cfg",
+	1477:  "ms-sna-server",
+	1478:  "ms-sna-base",
+	1479:  "dberegister",
+	1480:  "pacerforum",
+	1481:  "airs",
+	1482:  "miteksys-lm",
+	1483:  "afs",
+	1484:  "confluent",
+	1485:  "lansource",
+	1486:  "nms-topo-serv",
+	1487:  "localinfosrvr",
+	1488:  "docstor",
+	1489:  "dmdocbroker",
+	1490:  "insitu-conf",
+	1492:  "stone-design-1",
+	1493:  "netmap-lm",
+	1494:  "ica",
+	1495:  "cvc",
+	1496:  "liberty-lm",
+	1497:  "rfx-lm",
+	1498:  "sybase-sqlany",
+	1499:  "fhc",
+	1500:  "vlsi-lm",
+	1501:  "saiscm",
+	1502:  "shivadiscovery",
+	1503:  "imtc-mcs",
+	1504:  "evb-elm",
+	1505:  "funkproxy",
+	1506:  "utcd",
+	1507:  "symplex",
+	1508:  "diagmond",
+	1509:  "robcad-lm",
+	1510:  "mvx-lm",
+	1511:  "3l-l1",
+	1512:  "wins",
+	1513:  "fujitsu-dtc",
+	1514:  "fujitsu-dtcns",
+	1515:  "ifor-protocol",
+	1516:  "vpad",
+	1517:  "vpac",
+	1518:  "vpvd",
+	1519:  "vpvc",
+	1520:  "atm-zip-office",
+	1521:  "ncube-lm",
+	1522:  "ricardo-lm",
+	1523:  "cichild-lm",
+	1524:  "ingreslock",
+	1525:  "orasrv",
+	1526:  "pdap-np",
+	1527:  "tlisrv",
+	1529:  "coauthor",
+	1530:  "rap-service",
+	1531:  "rap-listen",
+	1532:  "miroconnect",
+	1533:  "virtual-places",
+	1534:  "micromuse-lm",
+	1535:  "ampr-info",
+	1536:  "ampr-inter",
+	1537:  "sdsc-lm",
+	1538:  "3ds-lm",
+	1539:  "intellistor-lm",
+	1540:  "rds",
+	1541:  "rds2",
+	1542:  "gridgen-elmd",
+	1543:  "simba-cs",
+	1544:  "aspeclmd",
+	1545:  "vistium-share",
+	1546:  "abbaccuray",
+	1547:  "laplink",
+	1548:  "axon-lm",
+	1549:  "shivahose",
+	1550:  "3m-image-lm",
+	1551:  "hecmtl-db",
+	1552:  "pciarray",
+	1553:  "sna-cs",
+	1554:  "caci-lm",
+	1555:  "livelan",
+	1556:  "veritas-pbx",
+	1557:  "arbortext-lm",
+	1558:  "xingmpeg",
+	1559:  "web2host",
+	1560:  "asci-val",
+	1561:  "facilityview",
+	1562:  "pconnectmgr",
+	1563:  "cadabra-lm",
+	1564:  "pay-per-view",
+	1565:  "winddlb",
+	1566:  "corelvideo",
+	1567:  "jlicelmd",
+	1568:  "tsspmap",
+	1569:  "ets",
+	1570:  "orbixd",
+	1571:  "rdb-dbs-disp",
+	1572:  "chip-lm",
+	1573:  "itscomm-ns",
+	1574:  "mvel-lm",
+	1575:  "oraclenames",
+	1576:  "moldflow-lm",
+	1577:  "hypercube-lm",
+	1578:  "jacobus-lm",
+	1579:  "ioc-sea-lm",
+	1580:  "tn-tl-r1",
+	1581:  "mil-2045-47001",
+	1582:  "msims",
+	1583:  "simbaexpress",
+	1584:  "tn-tl-fd2",
+	1585:  "intv",
+	1586:  "ibm-abtact",
+	1587:  "pra-elmd",
+	1588:  "triquest-lm",
+	1589:  "vqp",
+	1590:  "gemini-lm",
+	1591:  "ncpm-pm",
+	1592:  "commonspace",
+	1593:  "mainsoft-lm",
+	1594:  "sixtrak",
+	1595:  "radio",
+	1596:  "radio-sm",
+	1597:  "orbplus-iiop",
+	1598:  "picknfs",
+	1599:  "simbaservices",
+	1600:  "issd",
+	1601:  "aas",
+	1602:  "inspect",
+	1603:  "picodbc",
+	1604:  "icabrowser",
+	1605:  "slp",
+	1606:  "slm-api",
+	1607:  "stt",
+	1608:  "smart-lm",
+	1609:  "isysg-lm",
+	1610:  "taurus-wh",
+	1611:  "ill",
+	1612:  "netbill-trans",
+	1613:  "netbill-keyrep",
+	1614:  "netbill-cred",
+	1615:  "netbill-auth",
+	1616:  "netbill-prod",
+	1617:  "nimrod-agent",
+	1618:  "skytelnet",
+	1619:  "xs-openstorage",
+	1620:  "faxportwinport",
+	1621:  "softdataphone",
+	1622:  "ontime",
+	1623:  "jaleosnd",
+	1624:  "udp-sr-port",
+	1625:  "svs-omagent",
+	1626:  "shockwave",
+	1627:  "t128-gateway",
+	1628:  "lontalk-norm",
+	1629:  "lontalk-urgnt",
+	1630:  "oraclenet8cman",
+	1631:  "visitview",
+	1632:  "pammratc",
+	1633:  "pammrpc",
+	1634:  "loaprobe",
+	1635:  "edb-server1",
+	1636:  "isdc",
+	1637:  "islc",
+	1638:  "ismc",
+	1639:  "cert-initiator",
+	1640:  "cert-responder",
+	1641:  "invision",
+	1642:  "isis-am",
+	1643:  "isis-ambc",
+	1644:  "saiseh",
+	1645:  "sightline",
+	1646:  "sa-msg-port",
+	1647:  "rsap",
+	1648:  "concurrent-lm",
+	1649:  "kermit",
+	1650:  "nkd",
+	1651:  "shiva-confsrvr",
+	1652:  "xnmp",
+	1653:  "alphatech-lm",
+	1654:  "stargatealerts",
+	1655:  "dec-mbadmin",
+	1656:  "dec-mbadmin-h",
+	1657:  "fujitsu-mmpdc",
+	1658:  "sixnetudr",
+	1659:  "sg-lm",
+	1660:  "skip-mc-gikreq",
+	1661:  "netview-aix-1",
+	1662:  "netview-aix-2",
+	1663:  "netview-aix-3",
+	1664:  "netview-aix-4",
+	1665:  "netview-aix-5",
+	1666:  "netview-aix-6",
+	1667:  "netview-aix-7",
+	1668:  "netview-aix-8",
+	1669:  "netview-aix-9",
+	1670:  "netview-aix-10",
+	1671:  "netview-aix-11",
+	1672:  "netview-aix-12",
+	1673:  "proshare-mc-1",
+	1674:  "proshare-mc-2",
+	1675:  "pdp",
+	1676:  "netcomm1",
+	1677:  "groupwise",
+	1678:  "prolink",
+	1679:  "darcorp-lm",
+	1680:  "microcom-sbp",
+	1681:  "sd-elmd",
+	1682:  "lanyon-lantern",
+	1683:  "ncpm-hip",
+	1684:  "snaresecure",
+	1685:  "n2nremote",
+	1686:  "cvmon",
+	1687:  "nsjtp-ctrl",
+	1688:  "nsjtp-data",
+	1689:  "firefox",
+	1690:  "ng-umds",
+	1691:  "empire-empuma",
+	1692:  "sstsys-lm",
+	1693:  "rrirtr",
+	1694:  "rrimwm",
+	1695:  "rrilwm",
+	1696:  "rrifmm",
+	1697:  "rrisat",
+	1698:  "rsvp-encap-1",
+	1699:  "rsvp-encap-2",
+	1700:  "mps-raft",
+	1701:  "l2f",
+	1702:  "deskshare",
+	1703:  "hb-engine",
+	1704:  "bcs-broker",
+	1705:  "slingshot",
+	1706:  "jetform",
+	1707:  "vdmplay",
+	1708:  "gat-lmd",
+	1709:  "centra",
+	1710:  "impera",
+	1711:  "pptconference",
+	1712:  "registrar",
+	1713:  "conferencetalk",
+	1714:  "sesi-lm",
+	1715:  "houdini-lm",
+	1716:  "xmsg",
+	1717:  "fj-hdnet",
+	1718:  "h323gatedisc",
+	1719:  "h323gatestat",
+	1720:  "h323hostcall",
+	1721:  "caicci",
+	1722:  "hks-lm",
+	1723:  "pptp",
+	1724:  "csbphonemaster",
+	1725:  "iden-ralp",
+	1726:  "iberiagames",
+	1727:  "winddx",
+	1728:  "telindus",
+	1729:  "citynl",
+	1730:  "roketz",
+	1731:  "msiccp",
+	1732:  "proxim",
+	1733:  "siipat",
+	1734:  "cambertx-lm",
+	1735:  "privatechat",
+	1736:  "street-stream",
+	1737:  "ultimad",
+	1738:  "gamegen1",
+	1739:  "webaccess",
+	1740:  "encore",
+	1741:  "cisco-net-mgmt",
+	1742:  "3Com-nsd",
+	1743:  "cinegrfx-lm",
+	1744:  "ncpm-ft",
+	1745:  "remote-winsock",
+	1746:  "ftrapid-1",
+	1747:  "ftrapid-2",
+	1748:  "oracle-em1",
+	1749:  "aspen-services",
+	1750:  "sslp",
+	1751:  "swiftnet",
+	1752:  "lofr-lm",
+	1753:  "predatar-comms",
+	1754:  "oracle-em2",
+	1755:  "ms-streaming",
+	1756:  "capfast-lmd",
+	1757:  "cnhrp",
+	1758:  "tftp-mcast",
+	1759:  "spss-lm",
+	1760:  "www-ldap-gw",
+	1761:  "cft-0",
+	1762:  "cft-1",
+	1763:  "cft-2",
+	1764:  "cft-3",
+	1765:  "cft-4",
+	1766:  "cft-5",
+	1767:  "cft-6",
+	1768:  "cft-7",
+	1769:  "bmc-net-adm",
+	1770:  "bmc-net-svc",
+	1771:  "vaultbase",
+	1772:  "essweb-gw",
+	1773:  "kmscontrol",
+	1774:  "global-dtserv",
+	1775:  "vdab",
+	1776:  "femis",
+	1777:  "powerguardian",
+	1778:  "prodigy-intrnet",
+	1779:  "pharmasoft",
+	1780:  "dpkeyserv",
+	1781:  "answersoft-lm",
+	1782:  "hp-hcip",
+	1784:  "finle-lm",
+	1785:  "windlm",
+	1786:  "funk-logger",
+	1787:  "funk-license",
+	1788:  "psmond",
+	1789:  "hello",
+	1790:  "nmsp",
+	1791:  "ea1",
+	1792:  "ibm-dt-2",
+	1793:  "rsc-robot",
+	1794:  "cera-bcm",
+	1795:  "dpi-proxy",
+	1796:  "vocaltec-admin",
+	1797:  "uma",
+	1798:  "etp",
+	1799:  "netrisk",
+	1800:  "ansys-lm",
+	1801:  "msmq",
+	1802:  "concomp1",
+	1803:  "hp-hcip-gwy",
+	1804:  "enl",
+	1805:  "enl-name",
+	1806:  "musiconline",
+	1807:  "fhsp",
+	1808:  "oracle-vp2",
+	1809:  "oracle-vp1",
+	1810:  "jerand-lm",
+	1811:  "scientia-sdb",
+	1812:  "radius",
+	1813:  "radius-acct",
+	1814:  "tdp-suite",
+	1815:  "mmpft",
+	1816:  "harp",
+	1817:  "rkb-oscs",
+	1818:  "etftp",
+	1819:  "plato-lm",
+	1820:  "mcagent",
+	1821:  "donnyworld",
+	1822:  "es-elmd",
+	1823:  "unisys-lm",
+	1824:  "metrics-pas",
+	1825:  "direcpc-video",
+	1826:  "ardt",
+	1827:  "asi",
+	1828:  "itm-mcell-u",
+	1829:  "optika-emedia",
+	1830:  "net8-cman",
+	1831:  "myrtle",
+	1832:  "tht-treasure",
+	1833:  "udpradio",
+	1834:  "ardusuni",
+	1835:  "ardusmul",
+	1836:  "ste-smsc",
+	1837:  "csoft1",
+	1838:  "talnet",
+	1839:  "netopia-vo1",
+	1840:  "netopia-vo2",
+	1841:  "netopia-vo3",
+	1842:  "netopia-vo4",
+	1843:  "netopia-vo5",
+	1844:  "direcpc-dll",
+	1845:  "altalink",
+	1846:  "tunstall-pnc",
+	1847:  "slp-notify",
+	1848:  "fjdocdist",
+	1849:  "alpha-sms",
+	1850:  "gsi",
+	1851:  "ctcd",
+	1852:  "virtual-time",
+	1853:  "vids-avtp",
+	1854:  "buddy-draw",
+	1855:  "fiorano-rtrsvc",
+	1856:  "fiorano-msgsvc",
+	1857:  "datacaptor",
+	1858:  "privateark",
+	1859:  "gammafetchsvr",
+	1860:  "sunscalar-svc",
+	1861:  "lecroy-vicp",
+	1862:  "mysql-cm-agent",
+	1863:  "msnp",
+	1864:  "paradym-31port",
+	1865:  "entp",
+	1866:  "swrmi",
+	1867:  "udrive",
+	1868:  "viziblebrowser",
+	1869:  "transact",
+	1870:  "sunscalar-dns",
+	1871:  "canocentral0",
+	1872:  "canocentral1",
+	1873:  "fjmpjps",
+	1874:  "fjswapsnp",
+	1875:  "westell-stats",
+	1876:  "ewcappsrv",
+	1877:  "hp-webqosdb",
+	1878:  "drmsmc",
+	1879:  "nettgain-nms",
+	1880:  "vsat-control",
+	1881:  "ibm-mqseries2",
+	1882:  "ecsqdmn",
+	1883:  "mqtt",
+	1884:  "idmaps",
+	1885:  "vrtstrapserver",
+	1886:  "leoip",
+	1887:  "filex-lport",
+	1888:  "ncconfig",
+	1889:  "unify-adapter",
+	1890:  "wilkenlistener",
+	1891:  "childkey-notif",
+	1892:  "childkey-ctrl",
+	1893:  "elad",
+	1894:  "o2server-port",
+	1896:  "b-novative-ls",
+	1897:  "metaagent",
+	1898:  "cymtec-port",
+	1899:  "mc2studios",
+	1900:  "ssdp",
+	1901:  "fjicl-tep-a",
+	1902:  "fjicl-tep-b",
+	1903:  "linkname",
+	1904:  "fjicl-tep-c",
+	1905:  "sugp",
+	1906:  "tpmd",
+	1907:  "intrastar",
+	1908:  "dawn",
+	1909:  "global-wlink",
+	1910:  "ultrabac",
+	1911:  "mtp",
+	1912:  "rhp-iibp",
+	1913:  "armadp",
+	1914:  "elm-momentum",
+	1915:  "facelink",
+	1916:  "persona",
+	1917:  "noagent",
+	1918:  "can-nds",
+	1919:  "can-dch",
+	1920:  "can-ferret",
+	1921:  "noadmin",
+	1922:  "tapestry",
+	1923:  "spice",
+	1924:  "xiip",
+	1925:  "discovery-port",
+	1926:  "egs",
+	1927:  "videte-cipc",
+	1928:  "emsd-port",
+	1929:  "bandwiz-system",
+	1930:  "driveappserver",
+	1931:  "amdsched",
+	1932:  "ctt-broker",
+	1933:  "xmapi",
+	1934:  "xaapi",
+	1935:  "macromedia-fcs",
+	1936:  "jetcmeserver",
+	1937:  "jwserver",
+	1938:  "jwclient",
+	1939:  "jvserver",
+	1940:  "jvclient",
+	1941:  "dic-aida",
+	1942:  "res",
+	1943:  "beeyond-media",
+	1944:  "close-combat",
+	1945:  "dialogic-elmd",
+	1946:  "tekpls",
+	1947:  "sentinelsrm",
+	1948:  "eye2eye",
+	1949:  "ismaeasdaqlive",
+	1950:  "ismaeasdaqtest",
+	1951:  "bcs-lmserver",
+	1952:  "mpnjsc",
+	1953:  "rapidbase",
+	1954:  "abr-api",
+	1955:  "abr-secure",
+	1956:  "vrtl-vmf-ds",
+	1957:  "unix-status",
+	1958:  "dxadmind",
+	1959:  "simp-all",
+	1960:  "nasmanager",
+	1961:  "bts-appserver",
+	1962:  "biap-mp",
+	1963:  "webmachine",
+	1964:  "solid-e-engine",
+	1965:  "tivoli-npm",
+	1966:  "slush",
+	1967:  "sns-quote",
+	1968:  "lipsinc",
+	1969:  "lipsinc1",
+	1970:  "netop-rc",
+	1971:  "netop-school",
+	1972:  "intersys-cache",
+	1973:  "dlsrap",
+	1974:  "drp",
+	1975:  "tcoflashagent",
+	1976:  "tcoregagent",
+	1977:  "tcoaddressbook",
+	1978:  "unisql",
+	1979:  "unisql-java",
+	1980:  "pearldoc-xact",
+	1981:  "p2pq",
+	1982:  "estamp",
+	1983:  "lhtp",
+	1984:  "bb",
+	1985:  "hsrp",
+	1986:  "licensedaemon",
+	1987:  "tr-rsrb-p1",
+	1988:  "tr-rsrb-p2",
+	1989:  "tr-rsrb-p3",
+	1990:  "stun-p1",
+	1991:  "stun-p2",
+	1992:  "stun-p3",
+	1993:  "snmp-tcp-port",
+	1994:  "stun-port",
+	1995:  "perf-port",
+	1996:  "tr-rsrb-port",
+	1997:  "gdp-port",
+	1998:  "x25-svc-port",
+	1999:  "tcp-id-port",
+	2000:  "cisco-sccp",
+	2001:  "dc",
+	2002:  "globe",
+	2003:  "brutus",
+	2004:  "mailbox",
+	2005:  "berknet",
+	2006:  "invokator",
+	2007:  "dectalk",
+	2008:  "conf",
+	2009:  "news",
+	2010:  "search",
+	2011:  "raid-cc",
+	2012:  "ttyinfo",
+	2013:  "raid-am",
+	2014:  "troff",
+	2015:  "cypress",
+	2016:  "bootserver",
+	2017:  "cypress-stat",
+	2018:  "terminaldb",
+	2019:  "whosockami",
+	2020:  "xinupageserver",
+	2021:  "servexec",
+	2022:  "down",
+	2023:  "xinuexpansion3",
+	2024:  "xinuexpansion4",
+	2025:  "ellpack",
+	2026:  "scrabble",
+	2027:  "shadowserver",
+	2028:  "submitserver",
+	2029:  "hsrpv6",
+	2030:  "device2",
+	2031:  "mobrien-chat",
+	2032:  "blackboard",
+	2033:  "glogger",
+	2034:  "scoremgr",
+	2035:  "imsldoc",
+	2036:  "e-dpnet",
+	2037:  "applus",
+	2038:  "objectmanager",
+	2039:  "prizma",
+	2040:  "lam",
+	2041:  "interbase",
+	2042:  "isis",
+	2043:  "isis-bcast",
+	2044:  "rimsl",
+	2045:  "cdfunc",
+	2046:  "sdfunc",
+	2047:  "dls",
+	2048:  "dls-monitor",
+	2049:  "shilp",
+	2050:  "av-emb-config",
+	2051:  "epnsdp",
+	2052:  "clearvisn",
+	2053:  "lot105-ds-upd",
+	2054:  "weblogin",
+	2055:  "iop",
+	2056:  "omnisky",
+	2057:  "rich-cp",
+	2058:  "newwavesearch",
+	2059:  "bmc-messaging",
+	2060:  "teleniumdaemon",
+	2061:  "netmount",
+	2062:  "icg-swp",
+	2063:  "icg-bridge",
+	2064:  "icg-iprelay",
+	2065:  "dlsrpn",
+	2066:  "aura",
+	2067:  "dlswpn",
+	2068:  "avauthsrvprtcl",
+	2069:  "event-port",
+	2070:  "ah-esp-encap",
+	2071:  "acp-port",
+	2072:  "msync",
+	2073:  "gxs-data-port",
+	2074:  "vrtl-vmf-sa",
+	2075:  "newlixengine",
+	2076:  "newlixconfig",
+	2077:  "tsrmagt",
+	2078:  "tpcsrvr",
+	2079:  "idware-router",
+	2080:  "autodesk-nlm",
+	2081:  "kme-trap-port",
+	2082:  "infowave",
+	2083:  "radsec",
+	2084:  "sunclustergeo",
+	2085:  "ada-cip",
+	2086:  "gnunet",
+	2087:  "eli",
+	2088:  "ip-blf",
+	2089:  "sep",
+	2090:  "lrp",
+	2091:  "prp",
+	2092:  "descent3",
+	2093:  "nbx-cc",
+	2094:  "nbx-au",
+	2095:  "nbx-ser",
+	2096:  "nbx-dir",
+	2097:  "jetformpreview",
+	2098:  "dialog-port",
+	2099:  "h2250-annex-g",
+	2100:  "amiganetfs",
+	2101:  "rtcm-sc104",
+	2102:  "zephyr-srv",
+	2103:  "zephyr-clt",
+	2104:  "zephyr-hm",
+	2105:  "minipay",
+	2106:  "mzap",
+	2107:  "bintec-admin",
+	2108:  "comcam",
+	2109:  "ergolight",
+	2110:  "umsp",
+	2111:  "dsatp",
+	2112:  "idonix-metanet",
+	2113:  "hsl-storm",
+	2114:  "newheights",
+	2115:  "kdm",
+	2116:  "ccowcmr",
+	2117:  "mentaclient",
+	2118:  "mentaserver",
+	2119:  "gsigatekeeper",
+	2120:  "qencp",
+	2121:  "scientia-ssdb",
+	2122:  "caupc-remote",
+	2123:  "gtp-control",
+	2124:  "elatelink",
+	2125:  "lockstep",
+	2126:  "pktcable-cops",
+	2127:  "index-pc-wb",
+	2128:  "net-steward",
+	2129:  "cs-live",
+	2130:  "xds",
+	2131:  "avantageb2b",
+	2132:  "solera-epmap",
+	2133:  "zymed-zpp",
+	2134:  "avenue",
+	2135:  "gris",
+	2136:  "appworxsrv",
+	2137:  "connect",
+	2138:  "unbind-cluster",
+	2139:  "ias-auth",
+	2140:  "ias-reg",
+	2141:  "ias-admind",
+	2142:  "tdmoip",
+	2143:  "lv-jc",
+	2144:  "lv-ffx",
+	2145:  "lv-pici",
+	2146:  "lv-not",
+	2147:  "lv-auth",
+	2148:  "veritas-ucl",
+	2149:  "acptsys",
+	2150:  "dynamic3d",
+	2151:  "docent",
+	2152:  "gtp-user",
+	2153:  "ctlptc",
+	2154:  "stdptc",
+	2155:  "brdptc",
+	2156:  "trp",
+	2157:  "xnds",
+	2158:  "touchnetplus",
+	2159:  "gdbremote",
+	2160:  "apc-2160",
+	2161:  "apc-2161",
+	2162:  "navisphere",
+	2163:  "navisphere-sec",
+	2164:  "ddns-v3",
+	2165:  "x-bone-api",
+	2166:  "iwserver",
+	2167:  "raw-serial",
+	2168:  "easy-soft-mux",
+	2169:  "brain",
+	2170:  "eyetv",
+	2171:  "msfw-storage",
+	2172:  "msfw-s-storage",
+	2173:  "msfw-replica",
+	2174:  "msfw-array",
+	2175:  "airsync",
+	2176:  "rapi",
+	2177:  "qwave",
+	2178:  "bitspeer",
+	2179:  "vmrdp",
+	2180:  "mc-gt-srv",
+	2181:  "eforward",
+	2182:  "cgn-stat",
+	2183:  "cgn-config",
+	2184:  "nvd",
+	2185:  "onbase-dds",
+	2186:  "gtaua",
+	2187:  "ssmc",
+	2188:  "radware-rpm",
+	2189:  "radware-rpm-s",
+	2190:  "tivoconnect",
+	2191:  "tvbus",
+	2192:  "asdis",
+	2193:  "drwcs",
+	2197:  "mnp-exchange",
+	2198:  "onehome-remote",
+	2199:  "onehome-help",
+	2200:  "ici",
+	2201:  "ats",
+	2202:  "imtc-map",
+	2203:  "b2-runtime",
+	2204:  "b2-license",
+	2205:  "jps",
+	2206:  "hpocbus",
+	2207:  "hpssd",
+	2208:  "hpiod",
+	2209:  "rimf-ps",
+	2210:  "noaaport",
+	2211:  "emwin",
+	2212:  "leecoposserver",
+	2213:  "kali",
+	2214:  "rpi",
+	2215:  "ipcore",
+	2216:  "vtu-comms",
+	2217:  "gotodevice",
+	2218:  "bounzza",
+	2219:  "netiq-ncap",
+	2220:  "netiq",
+	2221:  "ethernet-ip-s",
+	2222:  "EtherNet-IP-1",
+	2223:  "rockwell-csp2",
+	2224:  "efi-mg",
+	2225:  "rcip-itu",
+	2226:  "di-drm",
+	2227:  "di-msg",
+	2228:  "ehome-ms",
+	2229:  "datalens",
+	2230:  "queueadm",
+	2231:  "wimaxasncp",
+	2232:  "ivs-video",
+	2233:  "infocrypt",
+	2234:  "directplay",
+	2235:  "sercomm-wlink",
+	2236:  "nani",
+	2237:  "optech-port1-lm",
+	2238:  "aviva-sna",
+	2239:  "imagequery",
+	2240:  "recipe",
+	2241:  "ivsd",
+	2242:  "foliocorp",
+	2243:  "magicom",
+	2244:  "nmsserver",
+	2245:  "hao",
+	2246:  "pc-mta-addrmap",
+	2247:  "antidotemgrsvr",
+	2248:  "ums",
+	2249:  "rfmp",
+	2250:  "remote-collab",
+	2251:  "dif-port",
+	2252:  "njenet-ssl",
+	2253:  "dtv-chan-req",
+	2254:  "seispoc",
+	2255:  "vrtp",
+	2256:  "pcc-mfp",
+	2257:  "simple-tx-rx",
+	2258:  "rcts",
+	2260:  "apc-2260",
+	2261:  "comotionmaster",
+	2262:  "comotionback",
+	2263:  "ecwcfg",
+	2264:  "apx500api-1",
+	2265:  "apx500api-2",
+	2266:  "mfserver",
+	2267:  "ontobroker",
+	2268:  "amt",
+	2269:  "mikey",
+	2270:  "starschool",
+	2271:  "mmcals",
+	2272:  "mmcal",
+	2273:  "mysql-im",
+	2274:  "pcttunnell",
+	2275:  "ibridge-data",
+	2276:  "ibridge-mgmt",
+	2277:  "bluectrlproxy",
+	2278:  "s3db",
+	2279:  "xmquery",
+	2280:  "lnvpoller",
+	2281:  "lnvconsole",
+	2282:  "lnvalarm",
+	2283:  "lnvstatus",
+	2284:  "lnvmaps",
+	2285:  "lnvmailmon",
+	2286:  "nas-metering",
+	2287:  "dna",
+	2288:  "netml",
+	2289:  "dict-lookup",
+	2290:  "sonus-logging",
+	2291:  "eapsp",
+	2292:  "mib-streaming",
+	2293:  "npdbgmngr",
+	2294:  "konshus-lm",
+	2295:  "advant-lm",
+	2296:  "theta-lm",
+	2297:  "d2k-datamover1",
+	2298:  "d2k-datamover2",
+	2299:  "pc-telecommute",
+	2300:  "cvmmon",
+	2301:  "cpq-wbem",
+	2302:  "binderysupport",
+	2303:  "proxy-gateway",
+	2304:  "attachmate-uts",
+	2305:  "mt-scaleserver",
+	2306:  "tappi-boxnet",
+	2307:  "pehelp",
+	2308:  "sdhelp",
+	2309:  "sdserver",
+	2310:  "sdclient",
+	2311:  "messageservice",
+	2312:  "wanscaler",
+	2313:  "iapp",
+	2314:  "cr-websystems",
+	2315:  "precise-sft",
+	2316:  "sent-lm",
+	2317:  "attachmate-g32",
+	2318:  "cadencecontrol",
+	2319:  "infolibria",
+	2320:  "siebel-ns",
+	2321:  "rdlap",
+	2322:  "ofsd",
+	2323:  "3d-nfsd",
+	2324:  "cosmocall",
+	2325:  "ansysli",
+	2326:  "idcp",
+	2327:  "xingcsm",
+	2328:  "netrix-sftm",
+	2329:  "nvd",
+	2330:  "tscchat",
+	2331:  "agentview",
+	2332:  "rcc-host",
+	2333:  "snapp",
+	2334:  "ace-client",
+	2335:  "ace-proxy",
+	2336:  "appleugcontrol",
+	2337:  "ideesrv",
+	2338:  "norton-lambert",
+	2339:  "3com-webview",
+	2340:  "wrs-registry",
+	2341:  "xiostatus",
+	2342:  "manage-exec",
+	2343:  "nati-logos",
+	2344:  "fcmsys",
+	2345:  "dbm",
+	2346:  "redstorm-join",
+	2347:  "redstorm-find",
+	2348:  "redstorm-info",
+	2349:  "redstorm-diag",
+	2350:  "psbserver",
+	2351:  "psrserver",
+	2352:  "pslserver",
+	2353:  "pspserver",
+	2354:  "psprserver",
+	2355:  "psdbserver",
+	2356:  "gxtelmd",
+	2357:  "unihub-server",
+	2358:  "futrix",
+	2359:  "flukeserver",
+	2360:  "nexstorindltd",
+	2361:  "tl1",
+	2362:  "digiman",
+	2363:  "mediacntrlnfsd",
+	2364:  "oi-2000",
+	2365:  "dbref",
+	2366:  "qip-login",
+	2367:  "service-ctrl",
+	2368:  "opentable",
+	2370:  "l3-hbmon",
+	2371:  "hp-rda",
+	2372:  "lanmessenger",
+	2373:  "remographlm",
+	2374:  "hydra",
+	2375:  "docker",
+	2376:  "docker-s",
+	2377:  "swarm",
+	2379:  "etcd-client",
+	2380:  "etcd-server",
+	2381:  "compaq-https",
+	2382:  "ms-olap3",
+	2383:  "ms-olap4",
+	2384:  "sd-request",
+	2385:  "sd-data",
+	2386:  "virtualtape",
+	2387:  "vsamredirector",
+	2388:  "mynahautostart",
+	2389:  "ovsessionmgr",
+	2390:  "rsmtp",
+	2391:  "3com-net-mgmt",
+	2392:  "tacticalauth",
+	2393:  "ms-olap1",
+	2394:  "ms-olap2",
+	2395:  "lan900-remote",
+	2396:  "wusage",
+	2397:  "ncl",
+	2398:  "orbiter",
+	2399:  "fmpro-fdal",
+	2400:  "opequus-server",
+	2401:  "cvspserver",
+	2402:  "taskmaster2000",
+	2403:  "taskmaster2000",
+	2404:  "iec-104",
+	2405:  "trc-netpoll",
+	2406:  "jediserver",
+	2407:  "orion",
+	2408:  "railgun-webaccl",
+	2409:  "sns-protocol",
+	2410:  "vrts-registry",
+	2411:  "netwave-ap-mgmt",
+	2412:  "cdn",
+	2413:  "orion-rmi-reg",
+	2414:  "beeyond",
+	2415:  "codima-rtp",
+	2416:  "rmtserver",
+	2417:  "composit-server",
+	2418:  "cas",
+	2419:  "attachmate-s2s",
+	2420:  "dslremote-mgmt",
+	2421:  "g-talk",
+	2422:  "crmsbits",
+	2423:  "rnrp",
+	2424:  "kofax-svr",
+	2425:  "fjitsuappmgr",
+	2426:  "vcmp",
+	2427:  "mgcp-gateway",
+	2428:  "ott",
+	2429:  "ft-role",
+	2430:  "venus",
+	2431:  "venus-se",
+	2432:  "codasrv",
+	2433:  "codasrv-se",
+	2434:  "pxc-epmap",
+	2435:  "optilogic",
+	2436:  "topx",
+	2437:  "unicontrol",
+	2438:  "msp",
+	2439:  "sybasedbsynch",
+	2440:  "spearway",
+	2441:  "pvsw-inet",
+	2442:  "netangel",
+	2443:  "powerclientcsf",
+	2444:  "btpp2sectrans",
+	2445:  "dtn1",
+	2446:  "bues-service",
+	2447:  "ovwdb",
+	2448:  "hpppssvr",
+	2449:  "ratl",
+	2450:  "netadmin",
+	2451:  "netchat",
+	2452:  "snifferclient",
+	2453:  "madge-ltd",
+	2454:  "indx-dds",
+	2455:  "wago-io-system",
+	2456:  "altav-remmgt",
+	2457:  "rapido-ip",
+	2458:  "griffin",
+	2459:  "community",
+	2460:  "ms-theater",
+	2461:  "qadmifoper",
+	2462:  "qadmifevent",
+	2463:  "lsi-raid-mgmt",
+	2464:  "direcpc-si",
+	2465:  "lbm",
+	2466:  "lbf",
+	2467:  "high-criteria",
+	2468:  "qip-msgd",
+	2469:  "mti-tcs-comm",
+	2470:  "taskman-port",
+	2471:  "seaodbc",
+	2472:  "c3",
+	2473:  "aker-cdp",
+	2474:  "vitalanalysis",
+	2475:  "ace-server",
+	2476:  "ace-svr-prop",
+	2477:  "ssm-cvs",
+	2478:  "ssm-cssps",
+	2479:  "ssm-els",
+	2480:  "powerexchange",
+	2481:  "giop",
+	2482:  "giop-ssl",
+	2483:  "ttc",
+	2484:  "ttc-ssl",
+	2485:  "netobjects1",
+	2486:  "netobjects2",
+	2487:  "pns",
+	2488:  "moy-corp",
+	2489:  "tsilb",
+	2490:  "qip-qdhcp",
+	2491:  "conclave-cpp",
+	2492:  "groove",
+	2493:  "talarian-mqs",
+	2494:  "bmc-ar",
+	2495:  "fast-rem-serv",
+	2496:  "dirgis",
+	2497:  "quaddb",
+	2498:  "odn-castraq",
+	2499:  "unicontrol",
+	2500:  "rtsserv",
+	2501:  "rtsclient",
+	2502:  "kentrox-prot",
+	2503:  "nms-dpnss",
+	2504:  "wlbs",
+	2505:  "ppcontrol",
+	2506:  "jbroker",
+	2507:  "spock",
+	2508:  "jdatastore",
+	2509:  "fjmpss",
+	2510:  "fjappmgrbulk",
+	2511:  "metastorm",
+	2512:  "citrixima",
+	2513:  "citrixadmin",
+	2514:  "facsys-ntp",
+	2515:  "facsys-router",
+	2516:  "maincontrol",
+	2517:  "call-sig-trans",
+	2518:  "willy",
+	2519:  "globmsgsvc",
+	2520:  "pvsw",
+	2521:  "adaptecmgr",
+	2522:  "windb",
+	2523:  "qke-llc-v3",
+	2524:  "optiwave-lm",
+	2525:  "ms-v-worlds",
+	2526:  "ema-sent-lm",
+	2527:  "iqserver",
+	2528:  "ncr-ccl",
+	2529:  "utsftp",
+	2530:  "vrcommerce",
+	2531:  "ito-e-gui",
+	2532:  "ovtopmd",
+	2533:  "snifferserver",
+	2534:  "combox-web-acc",
+	2535:  "madcap",
+	2536:  "btpp2audctr1",
+	2537:  "upgrade",
+	2538:  "vnwk-prapi",
+	2539:  "vsiadmin",
+	2540:  "lonworks",
+	2541:  "lonworks2",
+	2542:  "udrawgraph",
+	2543:  "reftek",
+	2544:  "novell-zen",
+	2545:  "sis-emt",
+	2546:  "vytalvaultbrtp",
+	2547:  "vytalvaultvsmp",
+	2548:  "vytalvaultpipe",
+	2549:  "ipass",
+	2550:  "ads",
+	2551:  "isg-uda-server",
+	2552:  "call-logging",
+	2553:  "efidiningport",
+	2554:  "vcnet-link-v10",
+	2555:  "compaq-wcp",
+	2556:  "nicetec-nmsvc",
+	2557:  "nicetec-mgmt",
+	2558:  "pclemultimedia",
+	2559:  "lstp",
+	2560:  "labrat",
+	2561:  "mosaixcc",
+	2562:  "delibo",
+	2563:  "cti-redwood",
+	2564:  "hp-3000-telnet",
+	2565:  "coord-svr",
+	2566:  "pcs-pcw",
+	2567:  "clp",
+	2568:  "spamtrap",
+	2569:  "sonuscallsig",
+	2570:  "hs-port",
+	2571:  "cecsvc",
+	2572:  "ibp",
+	2573:  "trustestablish",
+	2574:  "blockade-bpsp",
+	2575:  "hl7",
+	2576:  "tclprodebugger",
+	2577:  "scipticslsrvr",
+	2578:  "rvs-isdn-dcp",
+	2579:  "mpfoncl",
+	2580:  "tributary",
+	2581:  "argis-te",
+	2582:  "argis-ds",
+	2583:  "mon",
+	2584:  "cyaserv",
+	2585:  "netx-server",
+	2586:  "netx-agent",
+	2587:  "masc",
+	2588:  "privilege",
+	2589:  "quartus-tcl",
+	2590:  "idotdist",
+	2591:  "maytagshuffle",
+	2592:  "netrek",
+	2593:  "mns-mail",
+	2594:  "dts",
+	2595:  "worldfusion1",
+	2596:  "worldfusion2",
+	2597:  "homesteadglory",
+	2598:  "citriximaclient",
+	2599:  "snapd",
+	2600:  "hpstgmgr",
+	2601:  "discp-client",
+	2602:  "discp-server",
+	2603:  "servicemeter",
+	2604:  "nsc-ccs",
+	2605:  "nsc-posa",
+	2606:  "netmon",
+	2607:  "connection",
+	2608:  "wag-service",
+	2609:  "system-monitor",
+	2610:  "versa-tek",
+	2611:  "lionhead",
+	2612:  "qpasa-agent",
+	2613:  "smntubootstrap",
+	2614:  "neveroffline",
+	2615:  "firepower",
+	2616:  "appswitch-emp",
+	2617:  "cmadmin",
+	2618:  "priority-e-com",
+	2619:  "bruce",
+	2620:  "lpsrecommender",
+	2621:  "miles-apart",
+	2622:  "metricadbc",
+	2623:  "lmdp",
+	2624:  "aria",
+	2625:  "blwnkl-port",
+	2626:  "gbjd816",
+	2627:  "moshebeeri",
+	2628:  "dict",
+	2629:  "sitaraserver",
+	2630:  "sitaramgmt",
+	2631:  "sitaradir",
+	2632:  "irdg-post",
+	2633:  "interintelli",
+	2634:  "pk-electronics",
+	2635:  "backburner",
+	2636:  "solve",
+	2637:  "imdocsvc",
+	2638:  "sybaseanywhere",
+	2639:  "aminet",
+	2640:  "ami-control",
+	2641:  "hdl-srv",
+	2642:  "tragic",
+	2643:  "gte-samp",
+	2644:  "travsoft-ipx-t",
+	2645:  "novell-ipx-cmd",
+	2646:  "and-lm",
+	2647:  "syncserver",
+	2648:  "upsnotifyprot",
+	2649:  "vpsipport",
+	2650:  "eristwoguns",
+	2651:  "ebinsite",
+	2652:  "interpathpanel",
+	2653:  "sonus",
+	2654:  "corel-vncadmin",
+	2655:  "unglue",
+	2656:  "kana",
+	2657:  "sns-dispatcher",
+	2658:  "sns-admin",
+	2659:  "sns-query",
+	2660:  "gcmonitor",
+	2661:  "olhost",
+	2662:  "bintec-capi",
+	2663:  "bintec-tapi",
+	2664:  "patrol-mq-gm",
+	2665:  "patrol-mq-nm",
+	2666:  "extensis",
+	2667:  "alarm-clock-s",
+	2668:  "alarm-clock-c",
+	2669:  "toad",
+	2670:  "tve-announce",
+	2671:  "newlixreg",
+	2672:  "nhserver",
+	2673:  "firstcall42",
+	2674:  "ewnn",
+	2675:  "ttc-etap",
+	2676:  "simslink",
+	2677:  "gadgetgate1way",
+	2678:  "gadgetgate2way",
+	2679:  "syncserverssl",
+	2680:  "pxc-sapxom",
+	2681:  "mpnjsomb",
+	2683:  "ncdloadbalance",
+	2684:  "mpnjsosv",
+	2685:  "mpnjsocl",
+	2686:  "mpnjsomg",
+	2687:  "pq-lic-mgmt",
+	2688:  "md-cg-http",
+	2689:  "fastlynx",
+	2690:  "hp-nnm-data",
+	2691:  "itinternet",
+	2692:  "admins-lms",
+	2694:  "pwrsevent",
+	2695:  "vspread",
+	2696:  "unifyadmin",
+	2697:  "oce-snmp-trap",
+	2698:  "mck-ivpip",
+	2699:  "csoft-plusclnt",
+	2700:  "tqdata",
+	2701:  "sms-rcinfo",
+	2702:  "sms-xfer",
+	2703:  "sms-chat",
+	2704:  "sms-remctrl",
+	2705:  "sds-admin",
+	2706:  "ncdmirroring",
+	2707:  "emcsymapiport",
+	2708:  "banyan-net",
+	2709:  "supermon",
+	2710:  "sso-service",
+	2711:  "sso-control",
+	2712:  "aocp",
+	2713:  "raventbs",
+	2714:  "raventdm",
+	2715:  "hpstgmgr2",
+	2716:  "inova-ip-disco",
+	2717:  "pn-requester",
+	2718:  "pn-requester2",
+	2719:  "scan-change",
+	2720:  "wkars",
+	2721:  "smart-diagnose",
+	2722:  "proactivesrvr",
+	2723:  "watchdog-nt",
+	2724:  "qotps",
+	2725:  "msolap-ptp2",
+	2726:  "tams",
+	2727:  "mgcp-callagent",
+	2728:  "sqdr",
+	2729:  "tcim-control",
+	2730:  "nec-raidplus",
+	2731:  "fyre-messanger",
+	2732:  "g5m",
+	2733:  "signet-ctf",
+	2734:  "ccs-software",
+	2735:  "netiq-mc",
+	2736:  "radwiz-nms-srv",
+	2737:  "srp-feedback",
+	2738:  "ndl-tcp-ois-gw",
+	2739:  "tn-timing",
+	2740:  "alarm",
+	2741:  "tsb",
+	2742:  "tsb2",
+	2743:  "murx",
+	2744:  "honyaku",
+	2745:  "urbisnet",
+	2746:  "cpudpencap",
+	2747:  "fjippol-swrly",
+	2748:  "fjippol-polsvr",
+	2749:  "fjippol-cnsl",
+	2750:  "fjippol-port1",
+	2751:  "fjippol-port2",
+	2752:  "rsisysaccess",
+	2753:  "de-spot",
+	2754:  "apollo-cc",
+	2755:  "expresspay",
+	2756:  "simplement-tie",
+	2757:  "cnrp",
+	2758:  "apollo-status",
+	2759:  "apollo-gms",
+	2760:  "sabams",
+	2761:  "dicom-iscl",
+	2762:  "dicom-tls",
+	2763:  "desktop-dna",
+	2764:  "data-insurance",
+	2765:  "qip-audup",
+	2766:  "compaq-scp",
+	2767:  "uadtc",
+	2768:  "uacs",
+	2769:  "exce",
+	2770:  "veronica",
+	2771:  "vergencecm",
+	2772:  "auris",
+	2773:  "rbakcup1",
+	2774:  "rbakcup2",
+	2775:  "smpp",
+	2776:  "ridgeway1",
+	2777:  "ridgeway2",
+	2778:  "gwen-sonya",
+	2779:  "lbc-sync",
+	2780:  "lbc-control",
+	2781:  "whosells",
+	2782:  "everydayrc",
+	2783:  "aises",
+	2784:  "www-dev",
+	2785:  "aic-np",
+	2786:  "aic-oncrpc",
+	2787:  "piccolo",
+	2788:  "fryeserv",
+	2789:  "media-agent",
+	2790:  "plgproxy",
+	2791:  "mtport-regist",
+	2792:  "f5-globalsite",
+	2793:  "initlsmsad",
+	2795:  "livestats",
+	2796:  "ac-tech",
+	2797:  "esp-encap",
+	2798:  "tmesis-upshot",
+	2799:  "icon-discover",
+	2800:  "acc-raid",
+	2801:  "igcp",
+	2802:  "veritas-tcp1",
+	2803:  "btprjctrl",
+	2804:  "dvr-esm",
+	2805:  "wta-wsp-s",
+	2806:  "cspuni",
+	2807:  "cspmulti",
+	2808:  "j-lan-p",
+	2809:  "corbaloc",
+	2810:  "netsteward",
+	2811:  "gsiftp",
+	2812:  "atmtcp",
+	2813:  "llm-pass",
+	2814:  "llm-csv",
+	2815:  "lbc-measure",
+	2816:  "lbc-watchdog",
+	2817:  "nmsigport",
+	2818:  "rmlnk",
+	2819:  "fc-faultnotify",
+	2820:  "univision",
+	2821:  "vrts-at-port",
+	2822:  "ka0wuc",
+	2823:  "cqg-netlan",
+	2824:  "cqg-netlan-1",
+	2826:  "slc-systemlog",
+	2827:  "slc-ctrlrloops",
+	2828:  "itm-lm",
+	2829:  "silkp1",
+	2830:  "silkp2",
+	2831:  "silkp3",
+	2832:  "silkp4",
+	2833:  "glishd",
+	2834:  "evtp",
+	2835:  "evtp-data",
+	2836:  "catalyst",
+	2837:  "repliweb",
+	2838:  "starbot",
+	2839:  "nmsigport",
+	2840:  "l3-exprt",
+	2841:  "l3-ranger",
+	2842:  "l3-hawk",
+	2843:  "pdnet",
+	2844:  "bpcp-poll",
+	2845:  "bpcp-trap",
+	2846:  "aimpp-hello",
+	2847:  "aimpp-port-req",
+	2848:  "amt-blc-port",
+	2849:  "fxp",
+	2850:  "metaconsole",
+	2851:  "webemshttp",
+	2852:  "bears-01",
+	2853:  "ispipes",
+	2854:  "infomover",
+	2855:  "msrp",
+	2856:  "cesdinv",
+	2857:  "simctlp",
+	2858:  "ecnp",
+	2859:  "activememory",
+	2860:  "dialpad-voice1",
+	2861:  "dialpad-voice2",
+	2862:  "ttg-protocol",
+	2863:  "sonardata",
+	2864:  "astromed-main",
+	2865:  "pit-vpn",
+	2866:  "iwlistener",
+	2867:  "esps-portal",
+	2868:  "npep-messaging",
+	2869:  "icslap",
+	2870:  "daishi",
+	2871:  "msi-selectplay",
+	2872:  "radix",
+	2874:  "dxmessagebase1",
+	2875:  "dxmessagebase2",
+	2876:  "sps-tunnel",
+	2877:  "bluelance",
+	2878:  "aap",
+	2879:  "ucentric-ds",
+	2880:  "synapse",
+	2881:  "ndsp",
+	2882:  "ndtp",
+	2883:  "ndnp",
+	2884:  "flashmsg",
+	2885:  "topflow",
+	2886:  "responselogic",
+	2887:  "aironetddp",
+	2888:  "spcsdlobby",
+	2889:  "rsom",
+	2890:  "cspclmulti",
+	2891:  "cinegrfx-elmd",
+	2892:  "snifferdata",
+	2893:  "vseconnector",
+	2894:  "abacus-remote",
+	2895:  "natuslink",
+	2896:  "ecovisiong6-1",
+	2897:  "citrix-rtmp",
+	2898:  "appliance-cfg",
+	2899:  "powergemplus",
+	2900:  "quicksuite",
+	2901:  "allstorcns",
+	2902:  "netaspi",
+	2903:  "suitcase",
+	2904:  "m2ua",
+	2905:  "m3ua",
+	2906:  "caller9",
+	2907:  "webmethods-b2b",
+	2908:  "mao",
+	2909:  "funk-dialout",
+	2910:  "tdaccess",
+	2911:  "blockade",
+	2912:  "epicon",
+	2913:  "boosterware",
+	2914:  "gamelobby",
+	2915:  "tksocket",
+	2916:  "elvin-server",
+	2917:  "elvin-client",
+	2918:  "kastenchasepad",
+	2919:  "roboer",
+	2920:  "roboeda",
+	2921:  "cesdcdman",
+	2922:  "cesdcdtrn",
+	2923:  "wta-wsp-wtp-s",
+	2924:  "precise-vip",
+	2926:  "mobile-file-dl",
+	2927:  "unimobilectrl",
+	2928:  "redstone-cpss",
+	2929:  "amx-webadmin",
+	2930:  "amx-weblinx",
+	2931:  "circle-x",
+	2932:  "incp",
+	2933:  "4-tieropmgw",
+	2934:  "4-tieropmcli",
+	2935:  "qtp",
+	2936:  "otpatch",
+	2937:  "pnaconsult-lm",
+	2938:  "sm-pas-1",
+	2939:  "sm-pas-2",
+	2940:  "sm-pas-3",
+	2941:  "sm-pas-4",
+	2942:  "sm-pas-5",
+	2943:  "ttnrepository",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	2946:  "fjsvmpor",
+	2947:  "gpsd",
+	2948:  "wap-push",
+	2949:  "wap-pushsecure",
+	2950:  "esip",
+	2951:  "ottp",
+	2952:  "mpfwsas",
+	2953:  "ovalarmsrv",
+	2954:  "ovalarmsrv-cmd",
+	2955:  "csnotify",
+	2956:  "ovrimosdbman",
+	2957:  "jmact5",
+	2958:  "jmact6",
+	2959:  "rmopagt",
+	2960:  "dfoxserver",
+	2961:  "boldsoft-lm",
+	2962:  "iph-policy-cli",
+	2963:  "iph-policy-adm",
+	2964:  "bullant-srap",
+	2965:  "bullant-rap",
+	2966:  "idp-infotrieve",
+	2967:  "ssc-agent",
+	2968:  "enpp",
+	2969:  "essp",
+	2970:  "index-net",
+	2971:  "netclip",
+	2972:  "pmsm-webrctl",
+	2973:  "svnetworks",
+	2974:  "signal",
+	2975:  "fjmpcm",
+	2976:  "cns-srv-port",
+	2977:  "ttc-etap-ns",
+	2978:  "ttc-etap-ds",
+	2979:  "h263-video",
+	2980:  "wimd",
+	2981:  "mylxamport",
+	2982:  "iwb-whiteboard",
+	2983:  "netplan",
+	2984:  "hpidsadmin",
+	2985:  "hpidsagent",
+	2986:  "stonefalls",
+	2987:  "identify",
+	2988:  "hippad",
+	2989:  "zarkov",
+	2990:  "boscap",
+	2991:  "wkstn-mon",
+	2992:  "avenyo",
+	2993:  "veritas-vis1",
+	2994:  "veritas-vis2",
+	2995:  "idrs",
+	2996:  "vsixml",
+	2997:  "rebol",
+	2998:  "realsecure",
+	2999:  "remoteware-un",
+	3000:  "hbci",
+	3001:  "origo-native",
+	3002:  "exlm-agent",
+	3003:  "cgms",
+	3004:  "csoftragent",
+	3005:  "geniuslm",
+	3006:  "ii-admin",
+	3007:  "lotusmtap",
+	3008:  "midnight-tech",
+	3009:  "pxc-ntfy",
+	3010:  "gw",
+	3011:  "trusted-web",
+	3012:  "twsdss",
+	3013:  "gilatskysurfer",
+	3014:  "broker-service",
+	3015:  "nati-dstp",
+	3016:  "notify-srvr",
+	3017:  "event-listener",
+	3018:  "srvc-registry",
+	3019:  "resource-mgr",
+	3020:  "cifs",
+	3021:  "agriserver",
+	3022:  "csregagent",
+	3023:  "magicnotes",
+	3024:  "nds-sso",
+	3025:  "arepa-raft",
+	3026:  "agri-gateway",
+	3027:  "LiebDevMgmt-C",
+	3028:  "LiebDevMgmt-DM",
+	3029:  "LiebDevMgmt-A",
+	3030:  "arepa-cas",
+	3031:  "eppc",
+	3032:  "redwood-chat",
+	3033:  "pdb",
+	3034:  "osmosis-aeea",
+	3035:  "fjsv-gssagt",
+	3036:  "hagel-dump",
+	3037:  "hp-san-mgmt",
+	3038:  "santak-ups",
+	3039:  "cogitate",
+	3040:  "tomato-springs",
+	3041:  "di-traceware",
+	3042:  "journee",
+	3043:  "brp",
+	3044:  "epp",
+	3045:  "responsenet",
+	3046:  "di-ase",
+	3047:  "hlserver",
+	3048:  "pctrader",
+	3049:  "nsws",
+	3050:  "gds-db",
+	3051:  "galaxy-server",
+	3052:  "apc-3052",
+	3053:  "dsom-server",
+	3054:  "amt-cnf-prot",
+	3055:  "policyserver",
+	3056:  "cdl-server",
+	3057:  "goahead-fldup",
+	3058:  "videobeans",
+	3059:  "qsoft",
+	3060:  "interserver",
+	3061:  "cautcpd",
+	3062:  "ncacn-ip-tcp",
+	3063:  "ncadg-ip-udp",
+	3064:  "rprt",
+	3065:  "slinterbase",
+	3066:  "netattachsdmp",
+	3067:  "fjhpjp",
+	3068:  "ls3bcast",
+	3069:  "ls3",
+	3070:  "mgxswitch",
+	3071:  "xplat-replicate",
+	3072:  "csd-monitor",
+	3073:  "vcrp",
+	3074:  "xbox",
+	3075:  "orbix-locator",
+	3076:  "orbix-config",
+	3077:  "orbix-loc-ssl",
+	3078:  "orbix-cfg-ssl",
+	3079:  "lv-frontpanel",
+	3080:  "stm-pproc",
+	3081:  "tl1-lv",
+	3082:  "tl1-raw",
+	3083:  "tl1-telnet",
+	3084:  "itm-mccs",
+	3085:  "pcihreq",
+	3086:  "jdl-dbkitchen",
+	3087:  "asoki-sma",
+	3088:  "xdtp",
+	3089:  "ptk-alink",
+	3090:  "stss",
+	3091:  "1ci-smcs",
+	3093:  "rapidmq-center",
+	3094:  "rapidmq-reg",
+	3095:  "panasas",
+	3096:  "ndl-aps",
+	3098:  "umm-port",
+	3099:  "chmd",
+	3100:  "opcon-xps",
+	3101:  "hp-pxpib",
+	3102:  "slslavemon",
+	3103:  "autocuesmi",
+	3104:  "autocuelog",
+	3105:  "cardbox",
+	3106:  "cardbox-http",
+	3107:  "business",
+	3108:  "geolocate",
+	3109:  "personnel",
+	3110:  "sim-control",
+	3111:  "wsynch",
+	3112:  "ksysguard",
+	3113:  "cs-auth-svr",
+	3114:  "ccmad",
+	3115:  "mctet-master",
+	3116:  "mctet-gateway",
+	3117:  "mctet-jserv",
+	3118:  "pkagent",
+	3119:  "d2000kernel",
+	3120:  "d2000webserver",
+	3121:  "pcmk-remote",
+	3122:  "vtr-emulator",
+	3123:  "edix",
+	3124:  "beacon-port",
+	3125:  "a13-an",
+	3127:  "ctx-bridge",
+	3128:  "ndl-aas",
+	3129:  "netport-id",
+	3130:  "icpv2",
+	3131:  "netbookmark",
+	3132:  "ms-rule-engine",
+	3133:  "prism-deploy",
+	3134:  "ecp",
+	3135:  "peerbook-port",
+	3136:  "grubd",
+	3137:  "rtnt-1",
+	3138:  "rtnt-2",
+	3139:  "incognitorv",
+	3140:  "ariliamulti",
+	3141:  "vmodem",
+	3142:  "rdc-wh-eos",
+	3143:  "seaview",
+	3144:  "tarantella",
+	3145:  "csi-lfap",
+	3146:  "bears-02",
+	3147:  "rfio",
+	3148:  "nm-game-admin",
+	3149:  "nm-game-server",
+	3150:  "nm-asses-admin",
+	3151:  "nm-assessor",
+	3152:  "feitianrockey",
+	3153:  "s8-client-port",
+	3154:  "ccmrmi",
+	3155:  "jpegmpeg",
+	3156:  "indura",
+	3157:  "e3consultants",
+	3158:  "stvp",
+	3159:  "navegaweb-port",
+	3160:  "tip-app-server",
+	3161:  "doc1lm",
+	3162:  "sflm",
+	3163:  "res-sap",
+	3164:  "imprs",
+	3165:  "newgenpay",
+	3166:  "sossecollector",
+	3167:  "nowcontact",
+	3168:  "poweronnud",
+	3169:  "serverview-as",
+	3170:  "serverview-asn",
+	3171:  "serverview-gf",
+	3172:  "serverview-rm",
+	3173:  "serverview-icc",
+	3174:  "armi-server",
+	3175:  "t1-e1-over-ip",
+	3176:  "ars-master",
+	3177:  "phonex-port",
+	3178:  "radclientport",
+	3179:  "h2gf-w-2m",
+	3180:  "mc-brk-srv",
+	3181:  "bmcpatrolagent",
+	3182:  "bmcpatrolrnvu",
+	3183:  "cops-tls",
+	3184:  "apogeex-port",
+	3185:  "smpppd",
+	3186:  "iiw-port",
+	3187:  "odi-port",
+	3188:  "brcm-comm-port",
+	3189:  "pcle-infex",
+	3190:  "csvr-proxy",
+	3191:  "csvr-sslproxy",
+	3192:  "firemonrcc",
+	3193:  "spandataport",
+	3194:  "magbind",
+	3195:  "ncu-1",
+	3196:  "ncu-2",
+	3197:  "embrace-dp-s",
+	3198:  "embrace-dp-c",
+	3199:  "dmod-workspace",
+	3200:  "tick-port",
+	3201:  "cpq-tasksmart",
+	3202:  "intraintra",
+	3203:  "netwatcher-mon",
+	3204:  "netwatcher-db",
+	3205:  "isns",
+	3206:  "ironmail",
+	3207:  "vx-auth-port",
+	3208:  "pfu-prcallback",
+	3209:  "netwkpathengine",
+	3210:  "flamenco-proxy",
+	3211:  "avsecuremgmt",
+	3212:  "surveyinst",
+	3213:  "neon24x7",
+	3214:  "jmq-daemon-1",
+	3215:  "jmq-daemon-2",
+	3216:  "ferrari-foam",
+	3217:  "unite",
+	3218:  "smartpackets",
+	3219:  "wms-messenger",
+	3220:  "xnm-ssl",
+	3221:  "xnm-clear-text",
+	3222:  "glbp",
+	3223:  "digivote",
+	3224:  "aes-discovery",
+	3225:  "fcip-port",
+	3226:  "isi-irp",
+	3227:  "dwnmshttp",
+	3228:  "dwmsgserver",
+	3229:  "global-cd-port",
+	3230:  "sftdst-port",
+	3231:  "vidigo",
+	3232:  "mdtp",
+	3233:  "whisker",
+	3234:  "alchemy",
+	3235:  "mdap-port",
+	3236:  "apparenet-ts",
+	3237:  "apparenet-tps",
+	3238:  "apparenet-as",
+	3239:  "apparenet-ui",
+	3240:  "triomotion",
+	3241:  "sysorb",
+	3242:  "sdp-id-port",
+	3243:  "timelot",
+	3244:  "onesaf",
+	3245:  "vieo-fe",
+	3246:  "dvt-system",
+	3247:  "dvt-data",
+	3248:  "procos-lm",
+	3249:  "ssp",
+	3250:  "hicp",
+	3251:  "sysscanner",
+	3252:  "dhe",
+	3253:  "pda-data",
+	3254:  "pda-sys",
+	3255:  "semaphore",
+	3256:  "cpqrpm-agent",
+	3257:  "cpqrpm-server",
+	3258:  "ivecon-port",
+	3259:  "epncdp2",
+	3260:  "iscsi-target",
+	3261:  "winshadow",
+	3262:  "necp",
+	3263:  "ecolor-imager",
+	3264:  "ccmail",
+	3265:  "altav-tunnel",
+	3266:  "ns-cfg-server",
+	3267:  "ibm-dial-out",
+	3268:  "msft-gc",
+	3269:  "msft-gc-ssl",
+	3270:  "verismart",
+	3271:  "csoft-prev",
+	3272:  "user-manager",
+	3273:  "sxmp",
+	3274:  "ordinox-server",
+	3275:  "samd",
+	3276:  "maxim-asics",
+	3277:  "awg-proxy",
+	3278:  "lkcmserver",
+	3279:  "admind",
+	3280:  "vs-server",
+	3281:  "sysopt",
+	3282:  "datusorb",
+	3283:  "Apple Remote Desktop (Net Assistant)",
+	3284:  "4talk",
+	3285:  "plato",
+	3286:  "e-net",
+	3287:  "directvdata",
+	3288:  "cops",
+	3289:  "enpc",
+	3290:  "caps-lm",
+	3291:  "sah-lm",
+	3292:  "cart-o-rama",
+	3293:  "fg-fps",
+	3294:  "fg-gip",
+	3295:  "dyniplookup",
+	3296:  "rib-slm",
+	3297:  "cytel-lm",
+	3298:  "deskview",
+	3299:  "pdrncs",
+	3300:  "ceph",
+	3302:  "mcs-fastmail",
+	3303:  "opsession-clnt",
+	3304:  "opsession-srvr",
+	3305:  "odette-ftp",
+	3306:  "mysql",
+	3307:  "opsession-prxy",
+	3308:  "tns-server",
+	3309:  "tns-adv",
+	3310:  "dyna-access",
+	3311:  "mcns-tel-ret",
+	3312:  "appman-server",
+	3313:  "uorb",
+	3314:  "uohost",
+	3315:  "cdid",
+	3316:  "aicc-cmi",
+	3317:  "vsaiport",
+	3318:  "ssrip",
+	3319:  "sdt-lmd",
+	3320:  "officelink2000",
+	3321:  "vnsstr",
+	3326:  "sftu",
+	3327:  "bbars",
+	3328:  "egptlm",
+	3329:  "hp-device-disc",
+	3330:  "mcs-calypsoicf",
+	3331:  "mcs-messaging",
+	3332:  "mcs-mailsvr",
+	3333:  "dec-notes",
+	3334:  "directv-web",
+	3335:  "directv-soft",
+	3336:  "directv-tick",
+	3337:  "directv-catlg",
+	3338:  "anet-b",
+	3339:  "anet-l",
+	3340:  "anet-m",
+	3341:  "anet-h",
+	3342:  "webtie",
+	3343:  "ms-cluster-net",
+	3344:  "bnt-manager",
+	3345:  "influence",
+	3346:  "trnsprntproxy",
+	3347:  "phoenix-rpc",
+	3348:  "pangolin-laser",
+	3349:  "chevinservices",
+	3350:  "findviatv",
+	3351:  "btrieve",
+	3352:  "ssql",
+	3353:  "fatpipe",
+	3354:  "suitjd",
+	3355:  "ordinox-dbase",
+	3356:  "upnotifyps",
+	3357:  "adtech-test",
+	3358:  "mpsysrmsvr",
+	3359:  "wg-netforce",
+	3360:  "kv-server",
+	3361:  "kv-agent",
+	3362:  "dj-ilm",
+	3363:  "nati-vi-server",
+	3364:  "creativeserver",
+	3365:  "contentserver",
+	3366:  "creativepartnr",
+	3372:  "tip2",
+	3373:  "lavenir-lm",
+	3374:  "cluster-disc",
+	3375:  "vsnm-agent",
+	3376:  "cdbroker",
+	3377:  "cogsys-lm",
+	3378:  "wsicopy",
+	3379:  "socorfs",
+	3380:  "sns-channels",
+	3381:  "geneous",
+	3382:  "fujitsu-neat",
+	3383:  "esp-lm",
+	3384:  "hp-clic",
+	3385:  "qnxnetman",
+	3386:  "gprs-data",
+	3387:  "backroomnet",
+	3388:  "cbserver",
+	3389:  "ms-wbt-server",
+	3390:  "dsc",
+	3391:  "savant",
+	3392:  "efi-lm",
+	3393:  "d2k-tapestry1",
+	3394:  "d2k-tapestry2",
+	3395:  "dyna-lm",
+	3396:  "printer-agent",
+	3397:  "cloanto-lm",
+	3398:  "mercantile",
+	3399:  "csms",
+	3400:  "csms2",
+	3401:  "filecast",
+	3402:  "fxaengine-net",
+	3405:  "nokia-ann-ch1",
+	3406:  "nokia-ann-ch2",
+	3407:  "ldap-admin",
+	3408:  "BESApi",
+	3409:  "networklens",
+	3410:  "networklenss",
+	3411:  "biolink-auth",
+	3412:  "xmlblaster",
+	3413:  "svnet",
+	3414:  "wip-port",
+	3415:  "bcinameservice",
+	3416:  "commandport",
+	3417:  "csvr",
+	3418:  "rnmap",
+	3419:  "softaudit",
+	3420:  "ifcp-port",
+	3421:  "bmap",
+	3422:  "rusb-sys-port",
+	3423:  "xtrm",
+	3424:  "xtrms",
+	3425:  "agps-port",
+	3426:  "arkivio",
+	3427:  "websphere-snmp",
+	3428:  "twcss",
+	3429:  "gcsp",
+	3430:  "ssdispatch",
+	3431:  "ndl-als",
+	3432:  "osdcp",
+	3433:  "opnet-smp",
+	3434:  "opencm",
+	3435:  "pacom",
+	3436:  "gc-config",
+	3437:  "autocueds",
+	3438:  "spiral-admin",
+	3439:  "hri-port",
+	3440:  "ans-console",
+	3441:  "connect-client",
+	3442:  "connect-server",
+	3443:  "ov-nnm-websrv",
+	3444:  "denali-server",
+	3445:  "monp",
+	3446:  "3comfaxrpc",
+	3447:  "directnet",
+	3448:  "dnc-port",
+	3449:  "hotu-chat",
+	3450:  "castorproxy",
+	3451:  "asam",
+	3452:  "sabp-signal",
+	3453:  "pscupd",
+	3454:  "mira",
+	3455:  "prsvp",
+	3456:  "vat",
+	3457:  "vat-control",
+	3458:  "d3winosfi",
+	3459:  "integral",
+	3460:  "edm-manager",
+	3461:  "edm-stager",
+	3462:  "edm-std-notify",
+	3463:  "edm-adm-notify",
+	3464:  "edm-mgr-sync",
+	3465:  "edm-mgr-cntrl",
+	3466:  "workflow",
+	3467:  "rcst",
+	3468:  "ttcmremotectrl",
+	3469:  "pluribus",
+	3470:  "jt400",
+	3471:  "jt400-ssl",
+	3472:  "jaugsremotec-1",
+	3473:  "jaugsremotec-2",
+	3474:  "ttntspauto",
+	3475:  "genisar-port",
+	3476:  "nppmp",
+	3477:  "ecomm",
+	3478:  "stun",
+	3479:  "twrpc",
+	3480:  "plethora",
+	3481:  "cleanerliverc",
+	3482:  "vulture",
+	3483:  "slim-devices",
+	3484:  "gbs-stp",
+	3485:  "celatalk",
+	3486:  "ifsf-hb-port",
+	3487:  "ltctcp",
+	3488:  "fs-rh-srv",
+	3489:  "dtp-dia",
+	3490:  "colubris",
+	3491:  "swr-port",
+	3492:  "tvdumtray-port",
+	3493:  "nut",
+	3494:  "ibm3494",
+	3495:  "seclayer-tcp",
+	3496:  "seclayer-tls",
+	3497:  "ipether232port",
+	3498:  "dashpas-port",
+	3499:  "sccip-media",
+	3500:  "rtmp-port",
+	3501:  "isoft-p2p",
+	3502:  "avinstalldisc",
+	3503:  "lsp-ping",
+	3504:  "ironstorm",
+	3505:  "ccmcomm",
+	3506:  "apc-3506",
+	3507:  "nesh-broker",
+	3508:  "interactionweb",
+	3509:  "vt-ssl",
+	3510:  "xss-port",
+	3511:  "webmail-2",
+	3512:  "aztec",
+	3513:  "arcpd",
+	3514:  "must-p2p",
+	3515:  "must-backplane",
+	3516:  "smartcard-port",
+	3517:  "802-11-iapp",
+	3518:  "artifact-msg",
+	3519:  "nvmsgd",
+	3520:  "galileolog",
+	3521:  "mc3ss",
+	3522:  "nssocketport",
+	3523:  "odeumservlink",
+	3524:  "ecmport",
+	3525:  "eisport",
+	3526:  "starquiz-port",
+	3527:  "beserver-msg-q",
+	3528:  "jboss-iiop",
+	3529:  "jboss-iiop-ssl",
+	3530:  "gf",
+	3531:  "joltid",
+	3532:  "raven-rmp",
+	3533:  "raven-rdp",
+	3534:  "urld-port",
+	3535:  "ms-la",
+	3536:  "snac",
+	3537:  "ni-visa-remote",
+	3538:  "ibm-diradm",
+	3539:  "ibm-diradm-ssl",
+	3540:  "pnrp-port",
+	3541:  "voispeed-port",
+	3542:  "hacl-monitor",
+	3543:  "qftest-lookup",
+	3544:  "teredo",
+	3545:  "camac",
+	3547:  "symantec-sim",
+	3548:  "interworld",
+	3549:  "tellumat-nms",
+	3550:  "ssmpp",
+	3551:  "apcupsd",
+	3552:  "taserver",
+	3553:  "rbr-discovery",
+	3554:  "questnotify",
+	3555:  "razor",
+	3556:  "sky-transport",
+	3557:  "personalos-001",
+	3558:  "mcp-port",
+	3559:  "cctv-port",
+	3560:  "iniserve-port",
+	3561:  "bmc-onekey",
+	3562:  "sdbproxy",
+	3563:  "watcomdebug",
+	3564:  "esimport",
+	3565:  "m2pa",
+	3566:  "quest-data-hub",
+	3567:  "dof-eps",
+	3568:  "dof-tunnel-sec",
+	3569:  "mbg-ctrl",
+	3570:  "mccwebsvr-port",
+	3571:  "megardsvr-port",
+	3572:  "megaregsvrport",
+	3573:  "tag-ups-1",
+	3574:  "dmaf-server",
+	3575:  "ccm-port",
+	3576:  "cmc-port",
+	3577:  "config-port",
+	3578:  "data-port",
+	3579:  "ttat3lb",
+	3580:  "nati-svrloc",
+	3581:  "kfxaclicensing",
+	3582:  "press",
+	3583:  "canex-watch",
+	3584:  "u-dbap",
+	3585:  "emprise-lls",
+	3586:  "emprise-lsc",
+	3587:  "p2pgroup",
+	3588:  "sentinel",
+	3589:  "isomair",
+	3590:  "wv-csp-sms",
+	3591:  "gtrack-server",
+	3592:  "gtrack-ne",
+	3593:  "bpmd",
+	3594:  "mediaspace",
+	3595:  "shareapp",
+	3596:  "iw-mmogame",
+	3597:  "a14",
+	3598:  "a15",
+	3599:  "quasar-server",
+	3600:  "trap-daemon",
+	3601:  "visinet-gui",
+	3602:  "infiniswitchcl",
+	3603:  "int-rcv-cntrl",
+	3604:  "bmc-jmx-port",
+	3605:  "comcam-io",
+	3606:  "splitlock",
+	3607:  "precise-i3",
+	3608:  "trendchip-dcp",
+	3609:  "cpdi-pidas-cm",
+	3610:  "echonet",
+	3611:  "six-degrees",
+	3612:  "hp-dataprotect",
+	3613:  "alaris-disc",
+	3614:  "sigma-port",
+	3615:  "start-network",
+	3616:  "cd3o-protocol",
+	3617:  "sharp-server",
+	3618:  "aairnet-1",
+	3619:  "aairnet-2",
+	3620:  "ep-pcp",
+	3621:  "ep-nsp",
+	3622:  "ff-lr-port",
+	3623:  "haipe-discover",
+	3624:  "dist-upgrade",
+	3625:  "volley",
+	3626:  "bvcdaemon-port",
+	3627:  "jamserverport",
+	3628:  "ept-machine",
+	3629:  "escvpnet",
+	3630:  "cs-remote-db",
+	3631:  "cs-services",
+	3632:  "distcc",
+	3633:  "wacp",
+	3634:  "hlibmgr",
+	3635:  "sdo",
+	3636:  "servistaitsm",
+	3637:  "scservp",
+	3638:  "ehp-backup",
+	3639:  "xap-ha",
+	3640:  "netplay-port1",
+	3641:  "netplay-port2",
+	3642:  "juxml-port",
+	3643:  "audiojuggler",
+	3644:  "ssowatch",
+	3645:  "cyc",
+	3646:  "xss-srv-port",
+	3647:  "splitlock-gw",
+	3648:  "fjcp",
+	3649:  "nmmp",
+	3650:  "prismiq-plugin",
+	3651:  "xrpc-registry",
+	3652:  "vxcrnbuport",
+	3653:  "tsp",
+	3654:  "vaprtm",
+	3655:  "abatemgr",
+	3656:  "abatjss",
+	3657:  "immedianet-bcn",
+	3658:  "ps-ams",
+	3659:  "apple-sasl",
+	3660:  "can-nds-ssl",
+	3661:  "can-ferret-ssl",
+	3662:  "pserver",
+	3663:  "dtp",
+	3664:  "ups-engine",
+	3665:  "ent-engine",
+	3666:  "eserver-pap",
+	3667:  "infoexch",
+	3668:  "dell-rm-port",
+	3669:  "casanswmgmt",
+	3670:  "smile",
+	3671:  "efcp",
+	3672:  "lispworks-orb",
+	3673:  "mediavault-gui",
+	3674:  "wininstall-ipc",
+	3675:  "calltrax",
+	3676:  "va-pacbase",
+	3677:  "roverlog",
+	3678:  "ipr-dglt",
+	3679:  "Escale (Newton Dock)",
+	3680:  "npds-tracker",
+	3681:  "bts-x73",
+	3682:  "cas-mapi",
+	3683:  "bmc-ea",
+	3684:  "faxstfx-port",
+	3685:  "dsx-agent",
+	3686:  "tnmpv2",
+	3687:  "simple-push",
+	3688:  "simple-push-s",
+	3689:  "daap",
+	3690:  "svn",
+	3691:  "magaya-network",
+	3692:  "intelsync",
+	3693:  "easl",
+	3695:  "bmc-data-coll",
+	3696:  "telnetcpcd",
+	3697:  "nw-license",
+	3698:  "sagectlpanel",
+	3699:  "kpn-icw",
+	3700:  "lrs-paging",
+	3701:  "netcelera",
+	3702:  "ws-discovery",
+	3703:  "adobeserver-3",
+	3704:  "adobeserver-4",
+	3705:  "adobeserver-5",
+	3706:  "rt-event",
+	3707:  "rt-event-s",
+	3708:  "sun-as-iiops",
+	3709:  "ca-idms",
+	3710:  "portgate-auth",
+	3711:  "edb-server2",
+	3712:  "sentinel-ent",
+	3713:  "tftps",
+	3714:  "delos-dms",
+	3715:  "anoto-rendezv",
+	3716:  "wv-csp-sms-cir",
+	3717:  "wv-csp-udp-cir",
+	3718:  "opus-services",
+	3719:  "itelserverport",
+	3720:  "ufastro-instr",
+	3721:  "xsync",
+	3722:  "xserveraid",
+	3723:  "sychrond",
+	3724:  "blizwow",
+	3725:  "na-er-tip",
+	3726:  "array-manager",
+	3727:  "e-mdu",
+	3728:  "e-woa",
+	3729:  "fksp-audit",
+	3730:  "client-ctrl",
+	3731:  "smap",
+	3732:  "m-wnn",
+	3733:  "multip-msg",
+	3734:  "synel-data",
+	3735:  "pwdis",
+	3736:  "rs-rmi",
+	3737:  "xpanel",
+	3738:  "versatalk",
+	3739:  "launchbird-lm",
+	3740:  "heartbeat",
+	3741:  "wysdma",
+	3742:  "cst-port",
+	3743:  "ipcs-command",
+	3744:  "sasg",
+	3745:  "gw-call-port",
+	3746:  "linktest",
+	3747:  "linktest-s",
+	3748:  "webdata",
+	3749:  "cimtrak",
+	3750:  "cbos-ip-port",
+	3751:  "gprs-cube",
+	3752:  "vipremoteagent",
+	3753:  "nattyserver",
+	3754:  "timestenbroker",
+	3755:  "sas-remote-hlp",
+	3756:  "canon-capt",
+	3757:  "grf-port",
+	3758:  "apw-registry",
+	3759:  "exapt-lmgr",
+	3760:  "adtempusclient",
+	3761:  "gsakmp",
+	3762:  "gbs-smp",
+	3763:  "xo-wave",
+	3764:  "mni-prot-rout",
+	3765:  "rtraceroute",
+	3766:  "sitewatch-s",
+	3767:  "listmgr-port",
+	3768:  "rblcheckd",
+	3769:  "haipe-otnk",
+	3770:  "cindycollab",
+	3771:  "paging-port",
+	3772:  "ctp",
+	3773:  "ctdhercules",
+	3774:  "zicom",
+	3775:  "ispmmgr",
+	3776:  "dvcprov-port",
+	3777:  "jibe-eb",
+	3778:  "c-h-it-port",
+	3779:  "cognima",
+	3780:  "nnp",
+	3781:  "abcvoice-port",
+	3782:  "iso-tp0s",
+	3783:  "bim-pem",
+	3784:  "bfd-control",
+	3785:  "bfd-echo",
+	3786:  "upstriggervsw",
+	3787:  "fintrx",
+	3788:  "isrp-port",
+	3789:  "remotedeploy",
+	3790:  "quickbooksrds",
+	3791:  "tvnetworkvideo",
+	3792:  "sitewatch",
+	3793:  "dcsoftware",
+	3794:  "jaus",
+	3795:  "myblast",
+	3796:  "spw-dialer",
+	3797:  "idps",
+	3798:  "minilock",
+	3799:  "radius-dynauth",
+	3800:  "pwgpsi",
+	3801:  "ibm-mgr",
+	3802:  "vhd",
+	3803:  "soniqsync",
+	3804:  "iqnet-port",
+	3805:  "tcpdataserver",
+	3806:  "wsmlb",
+	3807:  "spugna",
+	3808:  "sun-as-iiops-ca",
+	3809:  "apocd",
+	3810:  "wlanauth",
+	3811:  "amp",
+	3812:  "neto-wol-server",
+	3813:  "rap-ip",
+	3814:  "neto-dcs",
+	3815:  "lansurveyorxml",
+	3816:  "sunlps-http",
+	3817:  "tapeware",
+	3818:  "crinis-hb",
+	3819:  "epl-slp",
+	3820:  "scp",
+	3821:  "pmcp",
+	3822:  "acp-discovery",
+	3823:  "acp-conduit",
+	3824:  "acp-policy",
+	3825:  "ffserver",
+	3826:  "warmux",
+	3827:  "netmpi",
+	3828:  "neteh",
+	3829:  "neteh-ext",
+	3830:  "cernsysmgmtagt",
+	3831:  "dvapps",
+	3832:  "xxnetserver",
+	3833:  "aipn-auth",
+	3834:  "spectardata",
+	3835:  "spectardb",
+	3836:  "markem-dcp",
+	3837:  "mkm-discovery",
+	3838:  "sos",
+	3839:  "amx-rms",
+	3840:  "flirtmitmir",
+	3841:  "shiprush-db-svr",
+	3842:  "nhci",
+	3843:  "quest-agent",
+	3844:  "rnm",
+	3845:  "v-one-spp",
+	3846:  "an-pcp",
+	3847:  "msfw-control",
+	3848:  "item",
+	3849:  "spw-dnspreload",
+	3850:  "qtms-bootstrap",
+	3851:  "spectraport",
+	3852:  "sse-app-config",
+	3853:  "sscan",
+	3854:  "stryker-com",
+	3855:  "opentrac",
+	3856:  "informer",
+	3857:  "trap-port",
+	3858:  "trap-port-mom",
+	3859:  "nav-port",
+	3860:  "sasp",
+	3861:  "winshadow-hd",
+	3862:  "giga-pocket",
+	3863:  "asap-tcp",
+	3864:  "asap-tcp-tls",
+	3865:  "xpl",
+	3866:  "dzdaemon",
+	3867:  "dzoglserver",
+	3868:  "diameter",
+	3869:  "ovsam-mgmt",
+	3870:  "ovsam-d-agent",
+	3871:  "avocent-adsap",
+	3872:  "oem-agent",
+	3873:  "fagordnc",
+	3874:  "sixxsconfig",
+	3875:  "pnbscada",
+	3876:  "dl-agent",
+	3877:  "xmpcr-interface",
+	3878:  "fotogcad",
+	3879:  "appss-lm",
+	3880:  "igrs",
+	3881:  "idac",
+	3882:  "msdts1",
+	3883:  "vrpn",
+	3884:  "softrack-meter",
+	3885:  "topflow-ssl",
+	3886:  "nei-management",
+	3887:  "ciphire-data",
+	3888:  "ciphire-serv",
+	3889:  "dandv-tester",
+	3890:  "ndsconnect",
+	3891:  "rtc-pm-port",
+	3892:  "pcc-image-port",
+	3893:  "cgi-starapi",
+	3894:  "syam-agent",
+	3895:  "syam-smc",
+	3896:  "sdo-tls",
+	3897:  "sdo-ssh",
+	3898:  "senip",
+	3899:  "itv-control",
+	3900:  "udt-os",
+	3901:  "nimsh",
+	3902:  "nimaux",
+	3903:  "charsetmgr",
+	3904:  "omnilink-port",
+	3905:  "mupdate",
+	3906:  "topovista-data",
+	3907:  "imoguia-port",
+	3908:  "hppronetman",
+	3909:  "surfcontrolcpa",
+	3910:  "prnrequest",
+	3911:  "prnstatus",
+	3912:  "gbmt-stars",
+	3913:  "listcrt-port",
+	3914:  "listcrt-port-2",
+	3915:  "agcat",
+	3916:  "wysdmc",
+	3917:  "aftmux",
+	3918:  "pktcablemmcops",
+	3919:  "hyperip",
+	3920:  "exasoftport1",
+	3921:  "herodotus-net",
+	3922:  "sor-update",
+	3923:  "symb-sb-port",
+	3924:  "mpl-gprs-port",
+	3925:  "zmp",
+	3926:  "winport",
+	3927:  "natdataservice",
+	3928:  "netboot-pxe",
+	3929:  "smauth-port",
+	3930:  "syam-webserver",
+	3931:  "msr-plugin-port",
+	3932:  "dyn-site",
+	3933:  "plbserve-port",
+	3934:  "sunfm-port",
+	3935:  "sdp-portmapper",
+	3936:  "mailprox",
+	3937:  "dvbservdsc",
+	3938:  "dbcontrol-agent",
+	3939:  "aamp",
+	3940:  "xecp-node",
+	3941:  "homeportal-web",
+	3942:  "srdp",
+	3943:  "tig",
+	3944:  "sops",
+	3945:  "emcads",
+	3946:  "backupedge",
+	3947:  "ccp",
+	3948:  "apdap",
+	3949:  "drip",
+	3950:  "namemunge",
+	3951:  "pwgippfax",
+	3952:  "i3-sessionmgr",
+	3953:  "xmlink-connect",
+	3954:  "adrep",
+	3955:  "p2pcommunity",
+	3956:  "gvcp",
+	3957:  "mqe-broker",
+	3958:  "mqe-agent",
+	3959:  "treehopper",
+	3960:  "bess",
+	3961:  "proaxess",
+	3962:  "sbi-agent",
+	3963:  "thrp",
+	3964:  "sasggprs",
+	3965:  "ati-ip-to-ncpe",
+	3966:  "bflckmgr",
+	3967:  "ppsms",
+	3968:  "ianywhere-dbns",
+	3969:  "landmarks",
+	3970:  "lanrevagent",
+	3971:  "lanrevserver",
+	3972:  "iconp",
+	3973:  "progistics",
+	3974:  "citysearch",
+	3975:  "airshot",
+	3976:  "opswagent",
+	3977:  "opswmanager",
+	3978:  "secure-cfg-svr",
+	3979:  "smwan",
+	3980:  "acms",
+	3981:  "starfish",
+	3982:  "eis",
+	3983:  "eisp",
+	3984:  "mapper-nodemgr",
+	3985:  "mapper-mapethd",
+	3986:  "mapper-ws-ethd",
+	3987:  "centerline",
+	3988:  "dcs-config",
+	3989:  "bv-queryengine",
+	3990:  "bv-is",
+	3991:  "bv-smcsrv",
+	3992:  "bv-ds",
+	3993:  "bv-agent",
+	3995:  "iss-mgmt-ssl",
+	3996:  "abcsoftware",
+	3997:  "agentsease-db",
+	3998:  "dnx",
+	3999:  "nvcnet",
+	4000:  "terabase",
+	4001:  "newoak",
+	4002:  "pxc-spvr-ft",
+	4003:  "pxc-splr-ft",
+	4004:  "pxc-roid",
+	4005:  "pxc-pin",
+	4006:  "pxc-spvr",
+	4007:  "pxc-splr",
+	4008:  "netcheque",
+	4009:  "chimera-hwm",
+	4010:  "samsung-unidex",
+	4011:  "altserviceboot",
+	4012:  "pda-gate",
+	4013:  "acl-manager",
+	4014:  "taiclock",
+	4015:  "talarian-mcast1",
+	4016:  "talarian-mcast2",
+	4017:  "talarian-mcast3",
+	4018:  "talarian-mcast4",
+	4019:  "talarian-mcast5",
+	4020:  "trap",
+	4021:  "nexus-portal",
+	4022:  "dnox",
+	4023:  "esnm-zoning",
+	4024:  "tnp1-port",
+	4025:  "partimage",
+	4026:  "as-debug",
+	4027:  "bxp",
+	4028:  "dtserver-port",
+	4029:  "ip-qsig",
+	4030:  "jdmn-port",
+	4031:  "suucp",
+	4032:  "vrts-auth-port",
+	4033:  "sanavigator",
+	4034:  "ubxd",
+	4035:  "wap-push-http",
+	4036:  "wap-push-https",
+	4037:  "ravehd",
+	4038:  "fazzt-ptp",
+	4039:  "fazzt-admin",
+	4040:  "yo-main",
+	4041:  "houston",
+	4042:  "ldxp",
+	4043:  "nirp",
+	4044:  "ltp",
+	4045:  "npp",
+	4046:  "acp-proto",
+	4047:  "ctp-state",
+	4049:  "wafs",
+	4050:  "cisco-wafs",
+	4051:  "cppdp",
+	4052:  "interact",
+	4053:  "ccu-comm-1",
+	4054:  "ccu-comm-2",
+	4055:  "ccu-comm-3",
+	4056:  "lms",
+	4057:  "wfm",
+	4058:  "kingfisher",
+	4059:  "dlms-cosem",
+	4060:  "dsmeter-iatc",
+	4061:  "ice-location",
+	4062:  "ice-slocation",
+	4063:  "ice-router",
+	4064:  "ice-srouter",
+	4065:  "avanti-cdp",
+	4066:  "pmas",
+	4067:  "idp",
+	4068:  "ipfltbcst",
+	4069:  "minger",
+	4070:  "tripe",
+	4071:  "aibkup",
+	4072:  "zieto-sock",
+	4073:  "iRAPP",
+	4074:  "cequint-cityid",
+	4075:  "perimlan",
+	4076:  "seraph",
+	4078:  "cssp",
+	4079:  "santools",
+	4080:  "lorica-in",
+	4081:  "lorica-in-sec",
+	4082:  "lorica-out",
+	4083:  "lorica-out-sec",
+	4085:  "ezmessagesrv",
+	4087:  "applusservice",
+	4088:  "npsp",
+	4089:  "opencore",
+	4090:  "omasgport",
+	4091:  "ewinstaller",
+	4092:  "ewdgs",
+	4093:  "pvxpluscs",
+	4094:  "sysrqd",
+	4095:  "xtgui",
+	4096:  "bre",
+	4097:  "patrolview",
+	4098:  "drmsfsd",
+	4099:  "dpcp",
+	4100:  "igo-incognito",
+	4101:  "brlp-0",
+	4102:  "brlp-1",
+	4103:  "brlp-2",
+	4104:  "brlp-3",
+	4105:  "shofar",
+	4106:  "synchronite",
+	4107:  "j-ac",
+	4108:  "accel",
+	4109:  "izm",
+	4110:  "g2tag",
+	4111:  "xgrid",
+	4112:  "apple-vpns-rp",
+	4113:  "aipn-reg",
+	4114:  "jomamqmonitor",
+	4115:  "cds",
+	4116:  "smartcard-tls",
+	4117:  "hillrserv",
+	4118:  "netscript",
+	4119:  "assuria-slm",
+	4120:  "minirem",
+	4121:  "e-builder",
+	4122:  "fprams",
+	4123:  "z-wave",
+	4124:  "tigv2",
+	4125:  "opsview-envoy",
+	4126:  "ddrepl",
+	4127:  "unikeypro",
+	4128:  "nufw",
+	4129:  "nuauth",
+	4130:  "fronet",
+	4131:  "stars",
+	4132:  "nuts-dem",
+	4133:  "nuts-bootp",
+	4134:  "nifty-hmi",
+	4135:  "cl-db-attach",
+	4136:  "cl-db-request",
+	4137:  "cl-db-remote",
+	4138:  "nettest",
+	4139:  "thrtx",
+	4140:  "cedros-fds",
+	4141:  "oirtgsvc",
+	4142:  "oidocsvc",
+	4143:  "oidsr",
+	4145:  "vvr-control",
+	4146:  "tgcconnect",
+	4147:  "vrxpservman",
+	4148:  "hhb-handheld",
+	4149:  "agslb",
+	4150:  "PowerAlert-nsa",
+	4151:  "menandmice-noh",
+	4152:  "idig-mux",
+	4153:  "mbl-battd",
+	4154:  "atlinks",
+	4155:  "bzr",
+	4156:  "stat-results",
+	4157:  "stat-scanner",
+	4158:  "stat-cc",
+	4159:  "nss",
+	4160:  "jini-discovery",
+	4161:  "omscontact",
+	4162:  "omstopology",
+	4163:  "silverpeakpeer",
+	4164:  "silverpeakcomm",
+	4165:  "altcp",
+	4166:  "joost",
+	4167:  "ddgn",
+	4168:  "pslicser",
+	4169:  "iadt",
+	4170:  "d-cinema-csp",
+	4171:  "ml-svnet",
+	4172:  "pcoip",
+	4174:  "smcluster",
+	4175:  "bccp",
+	4176:  "tl-ipcproxy",
+	4177:  "wello",
+	4178:  "storman",
+	4179:  "MaxumSP",
+	4180:  "httpx",
+	4181:  "macbak",
+	4182:  "pcptcpservice",
+	4183:  "cyborgnet",
+	4184:  "universe-suite",
+	4185:  "wcpp",
+	4186:  "boxbackupstore",
+	4187:  "csc-proxy",
+	4188:  "vatata",
+	4189:  "pcep",
+	4190:  "sieve",
+	4192:  "azeti",
+	4193:  "pvxplusio",
+	4197:  "hctl",
+	4199:  "eims-admin",
+	4300:  "corelccam",
+	4301:  "d-data",
+	4302:  "d-data-control",
+	4303:  "srcp",
+	4304:  "owserver",
+	4305:  "batman",
+	4306:  "pinghgl",
+	4307:  "trueconf",
+	4308:  "compx-lockview",
+	4309:  "dserver",
+	4310:  "mirrtex",
+	4311:  "p6ssmc",
+	4312:  "pscl-mgt",
+	4313:  "perrla",
+	4314:  "choiceview-agt",
+	4316:  "choiceview-clt",
+	4320:  "fdt-rcatp",
+	4321:  "rwhois",
+	4322:  "trim-event",
+	4323:  "trim-ice",
+	4325:  "geognosisman",
+	4326:  "geognosis",
+	4327:  "jaxer-web",
+	4328:  "jaxer-manager",
+	4329:  "publiqare-sync",
+	4330:  "dey-sapi",
+	4331:  "ktickets-rest",
+	4333:  "ahsp",
+	4334:  "netconf-ch-ssh",
+	4335:  "netconf-ch-tls",
+	4336:  "restconf-ch-tls",
+	4340:  "gaia",
+	4341:  "lisp-data",
+	4342:  "lisp-cons",
+	4343:  "unicall",
+	4344:  "vinainstall",
+	4345:  "m4-network-as",
+	4346:  "elanlm",
+	4347:  "lansurveyor",
+	4348:  "itose",
+	4349:  "fsportmap",
+	4350:  "net-device",
+	4351:  "plcy-net-svcs",
+	4352:  "pjlink",
+	4353:  "f5-iquery",
+	4354:  "qsnet-trans",
+	4355:  "qsnet-workst",
+	4356:  "qsnet-assist",
+	4357:  "qsnet-cond",
+	4358:  "qsnet-nucl",
+	4359:  "omabcastltkm",
+	4360:  "matrix-vnet",
+	4368:  "wxbrief",
+	4369:  "epmd",
+	4370:  "elpro-tunnel",
+	4371:  "l2c-control",
+	4372:  "l2c-data",
+	4373:  "remctl",
+	4374:  "psi-ptt",
+	4375:  "tolteces",
+	4376:  "bip",
+	4377:  "cp-spxsvr",
+	4378:  "cp-spxdpy",
+	4379:  "ctdb",
+	4389:  "xandros-cms",
+	4390:  "wiegand",
+	4391:  "apwi-imserver",
+	4392:  "apwi-rxserver",
+	4393:  "apwi-rxspooler",
+	4395:  "omnivisionesx",
+	4396:  "fly",
+	4400:  "ds-srv",
+	4401:  "ds-srvr",
+	4402:  "ds-clnt",
+	4403:  "ds-user",
+	4404:  "ds-admin",
+	4405:  "ds-mail",
+	4406:  "ds-slp",
+	4407:  "nacagent",
+	4408:  "slscc",
+	4409:  "netcabinet-com",
+	4410:  "itwo-server",
+	4411:  "found",
+	4413:  "avi-nms",
+	4414:  "updog",
+	4415:  "brcd-vr-req",
+	4416:  "pjj-player",
+	4417:  "workflowdir",
+	4419:  "cbp",
+	4420:  "nvm-express",
+	4421:  "scaleft",
+	4422:  "tsepisp",
+	4423:  "thingkit",
+	4425:  "netrockey6",
+	4426:  "beacon-port-2",
+	4427:  "drizzle",
+	4428:  "omviserver",
+	4429:  "omviagent",
+	4430:  "rsqlserver",
+	4431:  "wspipe",
+	4432:  "l-acoustics",
+	4433:  "vop",
+	4442:  "saris",
+	4443:  "pharos",
+	4444:  "krb524",
+	4445:  "upnotifyp",
+	4446:  "n1-fwp",
+	4447:  "n1-rmgmt",
+	4448:  "asc-slmd",
+	4449:  "privatewire",
+	4450:  "camp",
+	4451:  "ctisystemmsg",
+	4452:  "ctiprogramload",
+	4453:  "nssalertmgr",
+	4454:  "nssagentmgr",
+	4455:  "prchat-user",
+	4456:  "prchat-server",
+	4457:  "prRegister",
+	4458:  "mcp",
+	4484:  "hpssmgmt",
+	4485:  "assyst-dr",
+	4486:  "icms",
+	4487:  "prex-tcp",
+	4488:  "awacs-ice",
+	4500:  "ipsec-nat-t",
+	4535:  "ehs",
+	4536:  "ehs-ssl",
+	4537:  "wssauthsvc",
+	4538:  "swx-gate",
+	4545:  "worldscores",
+	4546:  "sf-lm",
+	4547:  "lanner-lm",
+	4548:  "synchromesh",
+	4549:  "aegate",
+	4550:  "gds-adppiw-db",
+	4551:  "ieee-mih",
+	4552:  "menandmice-mon",
+	4553:  "icshostsvc",
+	4554:  "msfrs",
+	4555:  "rsip",
+	4556:  "dtn-bundle",
+	4559:  "hylafax",
+	4563:  "amahi-anywhere",
+	4566:  "kwtc",
+	4567:  "tram",
+	4568:  "bmc-reporting",
+	4569:  "iax",
+	4570:  "deploymentmap",
+	4573:  "cardifftec-back",
+	4590:  "rid",
+	4591:  "l3t-at-an",
+	4593:  "ipt-anri-anri",
+	4594:  "ias-session",
+	4595:  "ias-paging",
+	4596:  "ias-neighbor",
+	4597:  "a21-an-1xbs",
+	4598:  "a16-an-an",
+	4599:  "a17-an-an",
+	4600:  "piranha1",
+	4601:  "piranha2",
+	4602:  "mtsserver",
+	4603:  "menandmice-upg",
+	4604:  "irp",
+	4605:  "sixchat",
+	4658:  "playsta2-app",
+	4659:  "playsta2-lob",
+	4660:  "smaclmgr",
+	4661:  "kar2ouche",
+	4662:  "oms",
+	4663:  "noteit",
+	4664:  "ems",
+	4665:  "contclientms",
+	4666:  "eportcomm",
+	4667:  "mmacomm",
+	4668:  "mmaeds",
+	4669:  "eportcommdata",
+	4670:  "light",
+	4671:  "acter",
+	4672:  "rfa",
+	4673:  "cxws",
+	4674:  "appiq-mgmt",
+	4675:  "dhct-status",
+	4676:  "dhct-alerts",
+	4677:  "bcs",
+	4678:  "traversal",
+	4679:  "mgesupervision",
+	4680:  "mgemanagement",
+	4681:  "parliant",
+	4682:  "finisar",
+	4683:  "spike",
+	4684:  "rfid-rp1",
+	4685:  "autopac",
+	4686:  "msp-os",
+	4687:  "nst",
+	4688:  "mobile-p2p",
+	4689:  "altovacentral",
+	4690:  "prelude",
+	4691:  "mtn",
+	4692:  "conspiracy",
+	4700:  "netxms-agent",
+	4701:  "netxms-mgmt",
+	4702:  "netxms-sync",
+	4703:  "npqes-test",
+	4704:  "assuria-ins",
+	4711:  "trinity-dist",
+	4725:  "truckstar",
+	4727:  "fcis",
+	4728:  "capmux",
+	4730:  "gearman",
+	4731:  "remcap",
+	4733:  "resorcs",
+	4737:  "ipdr-sp",
+	4738:  "solera-lpn",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	4741:  "lumimgrd",
+	4742:  "sicct",
+	4743:  "openhpid",
+	4744:  "ifsp",
+	4745:  "fmp",
+	4749:  "profilemac",
+	4750:  "ssad",
+	4751:  "spocp",
+	4752:  "snap",
+	4753:  "simon",
+	4756:  "RDCenter",
+	4774:  "converge",
+	4784:  "bfd-multi-ctl",
+	4786:  "smart-install",
+	4787:  "sia-ctrl-plane",
+	4788:  "xmcp",
+	4800:  "iims",
+	4801:  "iwec",
+	4802:  "ilss",
+	4803:  "notateit",
+	4827:  "htcp",
+	4837:  "varadero-0",
+	4838:  "varadero-1",
+	4839:  "varadero-2",
+	4840:  "opcua-tcp",
+	4841:  "quosa",
+	4842:  "gw-asv",
+	4843:  "opcua-tls",
+	4844:  "gw-log",
+	4845:  "wcr-remlib",
+	4846:  "contamac-icm",
+	4847:  "wfc",
+	4848:  "appserv-http",
+	4849:  "appserv-https",
+	4850:  "sun-as-nodeagt",
+	4851:  "derby-repli",
+	4867:  "unify-debug",
+	4868:  "phrelay",
+	4869:  "phrelaydbg",
+	4870:  "cc-tracking",
+	4871:  "wired",
+	4876:  "tritium-can",
+	4877:  "lmcs",
+	4879:  "wsdl-event",
+	4880:  "hislip",
+	4883:  "wmlserver",
+	4884:  "hivestor",
+	4885:  "abbs",
+	4894:  "lyskom",
+	4899:  "radmin-port",
+	4900:  "hfcs",
+	4901:  "flr-agent",
+	4902:  "magiccontrol",
+	4912:  "lutap",
+	4913:  "lutcp",
+	4914:  "bones",
+	4915:  "frcs",
+	4940:  "eq-office-4940",
+	4941:  "eq-office-4941",
+	4942:  "eq-office-4942",
+	4949:  "munin",
+	4950:  "sybasesrvmon",
+	4951:  "pwgwims",
+	4952:  "sagxtsds",
+	4953:  "dbsyncarbiter",
+	4969:  "ccss-qmm",
+	4970:  "ccss-qsm",
+	4971:  "burp",
+	4984:  "webyast",
+	4985:  "gerhcs",
+	4986:  "mrip",
+	4987:  "smar-se-port1",
+	4988:  "smar-se-port2",
+	4989:  "parallel",
+	4990:  "busycal",
+	4991:  "vrt",
+	4999:  "hfcs-manager",
+	5000:  "commplex-main",
+	5001:  "commplex-link",
+	5002:  "rfe",
+	5003:  "fmpro-internal",
+	5004:  "avt-profile-1",
+	5005:  "avt-profile-2",
+	5006:  "wsm-server",
+	5007:  "wsm-server-ssl",
+	5008:  "synapsis-edge",
+	5009:  "winfs",
+	5010:  "telelpathstart",
+	5011:  "telelpathattack",
+	5012:  "nsp",
+	5013:  "fmpro-v6",
+	5015:  "fmwp",
+	5020:  "zenginkyo-1",
+	5021:  "zenginkyo-2",
+	5022:  "mice",
+	5023:  "htuilsrv",
+	5024:  "scpi-telnet",
+	5025:  "scpi-raw",
+	5026:  "strexec-d",
+	5027:  "strexec-s",
+	5028:  "qvr",
+	5029:  "infobright",
+	5030:  "surfpass",
+	5032:  "signacert-agent",
+	5033:  "jtnetd-server",
+	5034:  "jtnetd-status",
+	5042:  "asnaacceler8db",
+	5043:  "swxadmin",
+	5044:  "lxi-evntsvc",
+	5045:  "osp",
+	5048:  "texai",
+	5049:  "ivocalize",
+	5050:  "mmcc",
+	5051:  "ita-agent",
+	5052:  "ita-manager",
+	5053:  "rlm",
+	5054:  "rlm-admin",
+	5055:  "unot",
+	5056:  "intecom-ps1",
+	5057:  "intecom-ps2",
+	5059:  "sds",
+	5060:  "sip",
+	5061:  "sips",
+	5062:  "na-localise",
+	5063:  "csrpc",
+	5064:  "ca-1",
+	5065:  "ca-2",
+	5066:  "stanag-5066",
+	5067:  "authentx",
+	5068:  "bitforestsrv",
+	5069:  "i-net-2000-npr",
+	5070:  "vtsas",
+	5071:  "powerschool",
+	5072:  "ayiya",
+	5073:  "tag-pm",
+	5074:  "alesquery",
+	5075:  "pvaccess",
+	5080:  "onscreen",
+	5081:  "sdl-ets",
+	5082:  "qcp",
+	5083:  "qfp",
+	5084:  "llrp",
+	5085:  "encrypted-llrp",
+	5086:  "aprigo-cs",
+	5087:  "biotic",
+	5093:  "sentinel-lm",
+	5094:  "hart-ip",
+	5099:  "sentlm-srv2srv",
+	5100:  "socalia",
+	5101:  "talarian-tcp",
+	5102:  "oms-nonsecure",
+	5103:  "actifio-c2c",
+	5106:  "actifioudsagent",
+	5107:  "actifioreplic",
+	5111:  "taep-as-svc",
+	5112:  "pm-cmdsvr",
+	5114:  "ev-services",
+	5115:  "autobuild",
+	5117:  "gradecam",
+	5120:  "barracuda-bbs",
+	5133:  "nbt-pc",
+	5134:  "ppactivation",
+	5135:  "erp-scale",
+	5137:  "ctsd",
+	5145:  "rmonitor-secure",
+	5146:  "social-alarm",
+	5150:  "atmp",
+	5151:  "esri-sde",
+	5152:  "sde-discovery",
+	5153:  "toruxserver",
+	5154:  "bzflag",
+	5155:  "asctrl-agent",
+	5156:  "rugameonline",
+	5157:  "mediat",
+	5161:  "snmpssh",
+	5162:  "snmpssh-trap",
+	5163:  "sbackup",
+	5164:  "vpa",
+	5165:  "ife-icorp",
+	5166:  "winpcs",
+	5167:  "scte104",
+	5168:  "scte30",
+	5172:  "pcoip-mgmt",
+	5190:  "aol",
+	5191:  "aol-1",
+	5192:  "aol-2",
+	5193:  "aol-3",
+	5194:  "cpscomm",
+	5195:  "ampl-lic",
+	5196:  "ampl-tableproxy",
+	5197:  "tunstall-lwp",
+	5200:  "targus-getdata",
+	5201:  "targus-getdata1",
+	5202:  "targus-getdata2",
+	5203:  "targus-getdata3",
+	5209:  "nomad",
+	5215:  "noteza",
+	5221:  "3exmp",
+	5222:  "xmpp-client",
+	5223:  "hpvirtgrp",
+	5224:  "hpvirtctrl",
+	5225:  "hp-server",
+	5226:  "hp-status",
+	5227:  "perfd",
+	5228:  "hpvroom",
+	5229:  "jaxflow",
+	5230:  "jaxflow-data",
+	5231:  "crusecontrol",
+	5232:  "csedaemon",
+	5233:  "enfs",
+	5234:  "eenet",
+	5235:  "galaxy-network",
+	5236:  "padl2sim",
+	5237:  "mnet-discovery",
+	5245:  "downtools",
+	5248:  "caacws",
+	5249:  "caaclang2",
+	5250:  "soagateway",
+	5251:  "caevms",
+	5252:  "movaz-ssc",
+	5253:  "kpdp",
+	5254:  "logcabin",
+	5264:  "3com-njack-1",
+	5265:  "3com-njack-2",
+	5269:  "xmpp-server",
+	5270:  "cartographerxmp",
+	5271:  "cuelink",
+	5272:  "pk",
+	5280:  "xmpp-bosh",
+	5281:  "undo-lm",
+	5282:  "transmit-port",
+	5298:  "presence",
+	5299:  "nlg-data",
+	5300:  "hacl-hb",
+	5301:  "hacl-gs",
+	5302:  "hacl-cfg",
+	5303:  "hacl-probe",
+	5304:  "hacl-local",
+	5305:  "hacl-test",
+	5306:  "sun-mc-grp",
+	5307:  "sco-aip",
+	5308:  "cfengine",
+	5309:  "jprinter",
+	5310:  "outlaws",
+	5312:  "permabit-cs",
+	5313:  "rrdp",
+	5314:  "opalis-rbt-ipc",
+	5315:  "hacl-poll",
+	5316:  "hpbladems",
+	5317:  "hpdevms",
+	5318:  "pkix-cmc",
+	5320:  "bsfserver-zn",
+	5321:  "bsfsvr-zn-ssl",
+	5343:  "kfserver",
+	5344:  "xkotodrcp",
+	5349:  "stuns",
+	5352:  "dns-llq",
+	5353:  "mdns",
+	5354:  "mdnsresponder",
+	5355:  "llmnr",
+	5356:  "ms-smlbiz",
+	5357:  "wsdapi",
+	5358:  "wsdapi-s",
+	5359:  "ms-alerter",
+	5360:  "ms-sideshow",
+	5361:  "ms-s-sideshow",
+	5362:  "serverwsd2",
+	5363:  "net-projection",
+	5397:  "stresstester",
+	5398:  "elektron-admin",
+	5399:  "securitychase",
+	5400:  "excerpt",
+	5401:  "excerpts",
+	5402:  "mftp",
+	5403:  "hpoms-ci-lstn",
+	5404:  "hpoms-dps-lstn",
+	5405:  "netsupport",
+	5406:  "systemics-sox",
+	5407:  "foresyte-clear",
+	5408:  "foresyte-sec",
+	5409:  "salient-dtasrv",
+	5410:  "salient-usrmgr",
+	5411:  "actnet",
+	5412:  "continuus",
+	5413:  "wwiotalk",
+	5414:  "statusd",
+	5415:  "ns-server",
+	5416:  "sns-gateway",
+	5417:  "sns-agent",
+	5418:  "mcntp",
+	5419:  "dj-ice",
+	5420:  "cylink-c",
+	5421:  "netsupport2",
+	5422:  "salient-mux",
+	5423:  "virtualuser",
+	5424:  "beyond-remote",
+	5425:  "br-channel",
+	5426:  "devbasic",
+	5427:  "sco-peer-tta",
+	5428:  "telaconsole",
+	5429:  "base",
+	5430:  "radec-corp",
+	5431:  "park-agent",
+	5432:  "postgresql",
+	5433:  "pyrrho",
+	5434:  "sgi-arrayd",
+	5435:  "sceanics",
+	5443:  "spss",
+	5445:  "smbdirect",
+	5450:  "tiepie",
+	5453:  "surebox",
+	5454:  "apc-5454",
+	5455:  "apc-5455",
+	5456:  "apc-5456",
+	5461:  "silkmeter",
+	5462:  "ttl-publisher",
+	5463:  "ttlpriceproxy",
+	5464:  "quailnet",
+	5465:  "netops-broker",
+	5470:  "apsolab-col",
+	5471:  "apsolab-cols",
+	5472:  "apsolab-tag",
+	5473:  "apsolab-tags",
+	5475:  "apsolab-data",
+	5500:  "fcp-addr-srvr1",
+	5501:  "fcp-addr-srvr2",
+	5502:  "fcp-srvr-inst1",
+	5503:  "fcp-srvr-inst2",
+	5504:  "fcp-cics-gw1",
+	5505:  "checkoutdb",
+	5506:  "amc",
+	5507:  "psl-management",
+	5550:  "cbus",
+	5553:  "sgi-eventmond",
+	5554:  "sgi-esphttp",
+	5555:  "personal-agent",
+	5556:  "freeciv",
+	5557:  "farenet",
+	5565:  "hpe-dp-bura",
+	5566:  "westec-connect",
+	5567:  "dof-dps-mc-sec",
+	5568:  "sdt",
+	5569:  "rdmnet-ctrl",
+	5573:  "sdmmp",
+	5574:  "lsi-bobcat",
+	5575:  "ora-oap",
+	5579:  "fdtracks",
+	5580:  "tmosms0",
+	5581:  "tmosms1",
+	5582:  "fac-restore",
+	5583:  "tmo-icon-sync",
+	5584:  "bis-web",
+	5585:  "bis-sync",
+	5586:  "att-mt-sms",
+	5597:  "ininmessaging",
+	5598:  "mctfeed",
+	5599:  "esinstall",
+	5600:  "esmmanager",
+	5601:  "esmagent",
+	5602:  "a1-msc",
+	5603:  "a1-bs",
+	5604:  "a3-sdunode",
+	5605:  "a4-sdunode",
+	5618:  "efr",
+	5627:  "ninaf",
+	5628:  "htrust",
+	5629:  "symantec-sfdb",
+	5630:  "precise-comm",
+	5631:  "pcanywheredata",
+	5632:  "pcanywherestat",
+	5633:  "beorl",
+	5634:  "xprtld",
+	5635:  "sfmsso",
+	5636:  "sfm-db-server",
+	5637:  "cssc",
+	5638:  "flcrs",
+	5639:  "ics",
+	5646:  "vfmobile",
+	5666:  "nrpe",
+	5670:  "filemq",
+	5671:  "amqps",
+	5672:  "amqp",
+	5673:  "jms",
+	5674:  "hyperscsi-port",
+	5675:  "v5ua",
+	5676:  "raadmin",
+	5677:  "questdb2-lnchr",
+	5678:  "rrac",
+	5679:  "dccm",
+	5680:  "auriga-router",
+	5681:  "ncxcp",
+	5688:  "ggz",
+	5689:  "qmvideo",
+	5693:  "rbsystem",
+	5696:  "kmip",
+	5700:  "supportassist",
+	5705:  "storageos",
+	5713:  "proshareaudio",
+	5714:  "prosharevideo",
+	5715:  "prosharedata",
+	5716:  "prosharerequest",
+	5717:  "prosharenotify",
+	5718:  "dpm",
+	5719:  "dpm-agent",
+	5720:  "ms-licensing",
+	5721:  "dtpt",
+	5722:  "msdfsr",
+	5723:  "omhs",
+	5724:  "omsdk",
+	5725:  "ms-ilm",
+	5726:  "ms-ilm-sts",
+	5727:  "asgenf",
+	5728:  "io-dist-data",
+	5729:  "openmail",
+	5730:  "unieng",
+	5741:  "ida-discover1",
+	5742:  "ida-discover2",
+	5743:  "watchdoc-pod",
+	5744:  "watchdoc",
+	5745:  "fcopy-server",
+	5746:  "fcopys-server",
+	5747:  "tunatic",
+	5748:  "tunalyzer",
+	5750:  "rscd",
+	5755:  "openmailg",
+	5757:  "x500ms",
+	5766:  "openmailns",
+	5767:  "s-openmail",
+	5768:  "openmailpxy",
+	5769:  "spramsca",
+	5770:  "spramsd",
+	5771:  "netagent",
+	5777:  "dali-port",
+	5780:  "vts-rpc",
+	5781:  "3par-evts",
+	5782:  "3par-mgmt",
+	5783:  "3par-mgmt-ssl",
+	5785:  "3par-rcopy",
+	5793:  "xtreamx",
+	5813:  "icmpd",
+	5814:  "spt-automation",
+	5841:  "shiprush-d-ch",
+	5842:  "reversion",
+	5859:  "wherehoo",
+	5863:  "ppsuitemsg",
+	5868:  "diameters",
+	5883:  "jute",
+	5900:  "rfb",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	5963:  "indy",
+	5968:  "mppolicy-v5",
+	5969:  "mppolicy-mgr",
+	5984:  "couchdb",
+	5985:  "wsman",
+	5986:  "wsmans",
+	5987:  "wbem-rmi",
+	5988:  "wbem-http",
+	5989:  "wbem-https",
+	5990:  "wbem-exp-https",
+	5991:  "nuxsl",
+	5992:  "consul-insight",
+	5993:  "cim-rs",
+	5999:  "cvsup",
+	6064:  "ndl-ahp-svc",
+	6065:  "winpharaoh",
+	6066:  "ewctsp",
+	6068:  "gsmp-ancp",
+	6069:  "trip",
+	6070:  "messageasap",
+	6071:  "ssdtp",
+	6072:  "diagnose-proc",
+	6073:  "directplay8",
+	6074:  "max",
+	6075:  "dpm-acm",
+	6076:  "msft-dpm-cert",
+	6077:  "iconstructsrv",
+	6084:  "reload-config",
+	6085:  "konspire2b",
+	6086:  "pdtp",
+	6087:  "ldss",
+	6088:  "doglms",
+	6099:  "raxa-mgmt",
+	6100:  "synchronet-db",
+	6101:  "synchronet-rtc",
+	6102:  "synchronet-upd",
+	6103:  "rets",
+	6104:  "dbdb",
+	6105:  "primaserver",
+	6106:  "mpsserver",
+	6107:  "etc-control",
+	6108:  "sercomm-scadmin",
+	6109:  "globecast-id",
+	6110:  "softcm",
+	6111:  "spc",
+	6112:  "dtspcd",
+	6113:  "dayliteserver",
+	6114:  "wrspice",
+	6115:  "xic",
+	6116:  "xtlserv",
+	6117:  "daylitetouch",
+	6121:  "spdy",
+	6122:  "bex-webadmin",
+	6123:  "backup-express",
+	6124:  "pnbs",
+	6130:  "damewaremobgtwy",
+	6133:  "nbt-wol",
+	6140:  "pulsonixnls",
+	6141:  "meta-corp",
+	6142:  "aspentec-lm",
+	6143:  "watershed-lm",
+	6144:  "statsci1-lm",
+	6145:  "statsci2-lm",
+	6146:  "lonewolf-lm",
+	6147:  "montage-lm",
+	6148:  "ricardo-lm",
+	6149:  "tal-pod",
+	6159:  "efb-aci",
+	6160:  "ecmp",
+	6161:  "patrol-ism",
+	6162:  "patrol-coll",
+	6163:  "pscribe",
+	6200:  "lm-x",
+	6209:  "qmtps",
+	6222:  "radmind",
+	6241:  "jeol-nsdtp-1",
+	6242:  "jeol-nsdtp-2",
+	6243:  "jeol-nsdtp-3",
+	6244:  "jeol-nsdtp-4",
+	6251:  "tl1-raw-ssl",
+	6252:  "tl1-ssh",
+	6253:  "crip",
+	6267:  "gld",
+	6268:  "grid",
+	6269:  "grid-alt",
+	6300:  "bmc-grx",
+	6301:  "bmc-ctd-ldap",
+	6306:  "ufmp",
+	6315:  "scup",
+	6316:  "abb-escp",
+	6317:  "nav-data-cmd",
+	6320:  "repsvc",
+	6321:  "emp-server1",
+	6322:  "emp-server2",
+	6324:  "hrd-ncs",
+	6325:  "dt-mgmtsvc",
+	6326:  "dt-vra",
+	6343:  "sflow",
+	6344:  "streletz",
+	6346:  "gnutella-svc",
+	6347:  "gnutella-rtr",
+	6350:  "adap",
+	6355:  "pmcs",
+	6360:  "metaedit-mu",
+	6370:  "metaedit-se",
+	6379:  "redis",
+	6382:  "metatude-mds",
+	6389:  "clariion-evr01",
+	6390:  "metaedit-ws",
+	6417:  "faxcomservice",
+	6418:  "syserverremote",
+	6419:  "svdrp",
+	6420:  "nim-vdrshell",
+	6421:  "nim-wan",
+	6432:  "pgbouncer",
+	6442:  "tarp",
+	6443:  "sun-sr-https",
+	6444:  "sge-qmaster",
+	6445:  "sge-execd",
+	6446:  "mysql-proxy",
+	6455:  "skip-cert-recv",
+	6456:  "skip-cert-send",
+	6464:  "ieee11073-20701",
+	6471:  "lvision-lm",
+	6480:  "sun-sr-http",
+	6481:  "servicetags",
+	6482:  "ldoms-mgmt",
+	6483:  "SunVTS-RMI",
+	6484:  "sun-sr-jms",
+	6485:  "sun-sr-iiop",
+	6486:  "sun-sr-iiops",
+	6487:  "sun-sr-iiop-aut",
+	6488:  "sun-sr-jmx",
+	6489:  "sun-sr-admin",
+	6500:  "boks",
+	6501:  "boks-servc",
+	6502:  "boks-servm",
+	6503:  "boks-clntd",
+	6505:  "badm-priv",
+	6506:  "badm-pub",
+	6507:  "bdir-priv",
+	6508:  "bdir-pub",
+	6509:  "mgcs-mfp-port",
+	6510:  "mcer-port",
+	6513:  "netconf-tls",
+	6514:  "syslog-tls",
+	6515:  "elipse-rec",
+	6543:  "lds-distrib",
+	6544:  "lds-dump",
+	6547:  "apc-6547",
+	6548:  "apc-6548",
+	6549:  "apc-6549",
+	6550:  "fg-sysupdate",
+	6551:  "sum",
+	6558:  "xdsxdm",
+	6566:  "sane-port",
+	6568:  "canit-store",
+	6579:  "affiliate",
+	6580:  "parsec-master",
+	6581:  "parsec-peer",
+	6582:  "parsec-game",
+	6583:  "joaJewelSuite",
+	6600:  "mshvlm",
+	6601:  "mstmg-sstp",
+	6602:  "wsscomfrmwk",
+	6619:  "odette-ftps",
+	6620:  "kftp-data",
+	6621:  "kftp",
+	6622:  "mcftp",
+	6623:  "ktelnet",
+	6624:  "datascaler-db",
+	6625:  "datascaler-ctl",
+	6626:  "wago-service",
+	6627:  "nexgen",
+	6628:  "afesc-mc",
+	6629:  "nexgen-aux",
+	6632:  "mxodbc-connect",
+	6640:  "ovsdb",
+	6653:  "openflow",
+	6655:  "pcs-sf-ui-man",
+	6656:  "emgmsg",
+	6670:  "vocaltec-gold",
+	6671:  "p4p-portal",
+	6672:  "vision-server",
+	6673:  "vision-elmd",
+	6678:  "vfbp",
+	6679:  "osaut",
+	6687:  "clever-ctrace",
+	6688:  "clever-tcpip",
+	6689:  "tsa",
+	6690:  "cleverdetect",
+	6697:  "ircs-u",
+	6701:  "kti-icad-srvr",
+	6702:  "e-design-net",
+	6703:  "e-design-web",
+	6714:  "ibprotocol",
+	6715:  "fibotrader-com",
+	6716:  "princity-agent",
+	6767:  "bmc-perf-agent",
+	6768:  "bmc-perf-mgrd",
+	6769:  "adi-gxp-srvprt",
+	6770:  "plysrv-http",
+	6771:  "plysrv-https",
+	6777:  "ntz-tracker",
+	6778:  "ntz-p2p-storage",
+	6785:  "dgpf-exchg",
+	6786:  "smc-jmx",
+	6787:  "smc-admin",
+	6788:  "smc-http",
+	6789:  "radg",
+	6790:  "hnmp",
+	6791:  "hnm",
+	6801:  "acnet",
+	6817:  "pentbox-sim",
+	6831:  "ambit-lm",
+	6841:  "netmo-default",
+	6842:  "netmo-http",
+	6850:  "iccrushmore",
+	6868:  "acctopus-cc",
+	6888:  "muse",
+	6900:  "rtimeviewer",
+	6901:  "jetstream",
+	6935:  "ethoscan",
+	6936:  "xsmsvc",
+	6946:  "bioserver",
+	6951:  "otlp",
+	6961:  "jmact3",
+	6962:  "jmevt2",
+	6963:  "swismgr1",
+	6964:  "swismgr2",
+	6965:  "swistrap",
+	6966:  "swispol",
+	6969:  "acmsoda",
+	6970:  "conductor",
+	6997:  "MobilitySrv",
+	6998:  "iatp-highpri",
+	6999:  "iatp-normalpri",
+	7000:  "afs3-fileserver",
+	7001:  "afs3-callback",
+	7002:  "afs3-prserver",
+	7003:  "afs3-vlserver",
+	7004:  "afs3-kaserver",
+	7005:  "afs3-volser",
+	7006:  "afs3-errors",
+	7007:  "afs3-bos",
+	7008:  "afs3-update",
+	7009:  "afs3-rmtsys",
+	7010:  "ups-onlinet",
+	7011:  "talon-disc",
+	7012:  "talon-engine",
+	7013:  "microtalon-dis",
+	7014:  "microtalon-com",
+	7015:  "talon-webserver",
+	7016:  "spg",
+	7017:  "grasp",
+	7018:  "fisa-svc",
+	7019:  "doceri-ctl",
+	7020:  "dpserve",
+	7021:  "dpserveadmin",
+	7022:  "ctdp",
+	7023:  "ct2nmcs",
+	7024:  "vmsvc",
+	7025:  "vmsvc-2",
+	7030:  "op-probe",
+	7031:  "iposplanet",
+	7070:  "arcp",
+	7071:  "iwg1",
+	7073:  "martalk",
+	7080:  "empowerid",
+	7099:  "lazy-ptop",
+	7100:  "font-service",
+	7101:  "elcn",
+	7117:  "rothaga",
+	7121:  "virprot-lm",
+	7128:  "scenidm",
+	7129:  "scenccs",
+	7161:  "cabsm-comm",
+	7162:  "caistoragemgr",
+	7163:  "cacsambroker",
+	7164:  "fsr",
+	7165:  "doc-server",
+	7166:  "aruba-server",
+	7167:  "casrmagent",
+	7168:  "cnckadserver",
+	7169:  "ccag-pib",
+	7170:  "nsrp",
+	7171:  "drm-production",
+	7172:  "metalbend",
+	7173:  "zsecure",
+	7174:  "clutild",
+	7200:  "fodms",
+	7201:  "dlip",
+	7202:  "pon-ictp",
+	7215:  "PS-Server",
+	7216:  "PS-Capture-Pro",
+	7227:  "ramp",
+	7228:  "citrixupp",
+	7229:  "citrixuppg",
+	7236:  "display",
+	7237:  "pads",
+	7244:  "frc-hicp",
+	7262:  "cnap",
+	7272:  "watchme-7272",
+	7273:  "oma-rlp",
+	7274:  "oma-rlp-s",
+	7275:  "oma-ulp",
+	7276:  "oma-ilp",
+	7277:  "oma-ilp-s",
+	7278:  "oma-dcdocbs",
+	7279:  "ctxlic",
+	7280:  "itactionserver1",
+	7281:  "itactionserver2",
+	7282:  "mzca-action",
+	7283:  "genstat",
+	7365:  "lcm-server",
+	7391:  "mindfilesys",
+	7392:  "mrssrendezvous",
+	7393:  "nfoldman",
+	7394:  "fse",
+	7395:  "winqedit",
+	7397:  "hexarc",
+	7400:  "rtps-discovery",
+	7401:  "rtps-dd-ut",
+	7402:  "rtps-dd-mt",
+	7410:  "ionixnetmon",
+	7411:  "daqstream",
+	7421:  "mtportmon",
+	7426:  "pmdmgr",
+	7427:  "oveadmgr",
+	7428:  "ovladmgr",
+	7429:  "opi-sock",
+	7430:  "xmpv7",
+	7431:  "pmd",
+	7437:  "faximum",
+	7443:  "oracleas-https",
+	7471:  "sttunnel",
+	7473:  "rise",
+	7474:  "neo4j",
+	7478:  "openit",
+	7491:  "telops-lmd",
+	7500:  "silhouette",
+	7501:  "ovbus",
+	7508:  "adcp",
+	7509:  "acplt",
+	7510:  "ovhpas",
+	7511:  "pafec-lm",
+	7542:  "saratoga",
+	7543:  "atul",
+	7544:  "nta-ds",
+	7545:  "nta-us",
+	7546:  "cfs",
+	7547:  "cwmp",
+	7548:  "tidp",
+	7549:  "nls-tl",
+	7551:  "controlone-con",
+	7560:  "sncp",
+	7563:  "cfw",
+	7566:  "vsi-omega",
+	7569:  "dell-eql-asm",
+	7570:  "aries-kfinder",
+	7574:  "coherence",
+	7588:  "sun-lm",
+	7606:  "mipi-debug",
+	7624:  "indi",
+	7626:  "simco",
+	7627:  "soap-http",
+	7628:  "zen-pawn",
+	7629:  "xdas",
+	7630:  "hawk",
+	7631:  "tesla-sys-msg",
+	7633:  "pmdfmgt",
+	7648:  "cuseeme",
+	7672:  "imqstomp",
+	7673:  "imqstomps",
+	7674:  "imqtunnels",
+	7675:  "imqtunnel",
+	7676:  "imqbrokerd",
+	7677:  "sun-user-https",
+	7680:  "pando-pub",
+	7683:  "dmt",
+	7687:  "bolt",
+	7689:  "collaber",
+	7697:  "klio",
+	7700:  "em7-secom",
+	7707:  "sync-em7",
+	7708:  "scinet",
+	7720:  "medimageportal",
+	7724:  "nsdeepfreezectl",
+	7725:  "nitrogen",
+	7726:  "freezexservice",
+	7727:  "trident-data",
+	7728:  "osvr",
+	7734:  "smip",
+	7738:  "aiagent",
+	7741:  "scriptview",
+	7742:  "msss",
+	7743:  "sstp-1",
+	7744:  "raqmon-pdu",
+	7747:  "prgp",
+	7775:  "inetfs",
+	7777:  "cbt",
+	7778:  "interwise",
+	7779:  "vstat",
+	7781:  "accu-lmgr",
+	7786:  "minivend",
+	7787:  "popup-reminders",
+	7789:  "office-tools",
+	7794:  "q3ade",
+	7797:  "pnet-conn",
+	7798:  "pnet-enc",
+	7799:  "altbsdp",
+	7800:  "asr",
+	7801:  "ssp-client",
+	7810:  "rbt-wanopt",
+	7845:  "apc-7845",
+	7846:  "apc-7846",
+	7847:  "csoauth",
+	7869:  "mobileanalyzer",
+	7870:  "rbt-smc",
+	7871:  "mdm",
+	7878:  "owms",
+	7880:  "pss",
+	7887:  "ubroker",
+	7900:  "mevent",
+	7901:  "tnos-sp",
+	7902:  "tnos-dp",
+	7903:  "tnos-dps",
+	7913:  "qo-secure",
+	7932:  "t2-drm",
+	7933:  "t2-brm",
+	7962:  "generalsync",
+	7967:  "supercell",
+	7979:  "micromuse-ncps",
+	7980:  "quest-vista",
+	7981:  "sossd-collect",
+	7982:  "sossd-agent",
+	7997:  "pushns",
+	7999:  "irdmi2",
+	8000:  "irdmi",
+	8001:  "vcom-tunnel",
+	8002:  "teradataordbms",
+	8003:  "mcreport",
+	8005:  "mxi",
+	8006:  "wpl-analytics",
+	8007:  "warppipe",
+	8008:  "http-alt",
+	8019:  "qbdb",
+	8020:  "intu-ec-svcdisc",
+	8021:  "intu-ec-client",
+	8022:  "oa-system",
+	8025:  "ca-audit-da",
+	8026:  "ca-audit-ds",
+	8032:  "pro-ed",
+	8033:  "mindprint",
+	8034:  "vantronix-mgmt",
+	8040:  "ampify",
+	8041:  "enguity-xccetp",
+	8042:  "fs-agent",
+	8043:  "fs-server",
+	8044:  "fs-mgmt",
+	8051:  "rocrail",
+	8052:  "senomix01",
+	8053:  "senomix02",
+	8054:  "senomix03",
+	8055:  "senomix04",
+	8056:  "senomix05",
+	8057:  "senomix06",
+	8058:  "senomix07",
+	8059:  "senomix08",
+	8066:  "toad-bi-appsrvr",
+	8067:  "infi-async",
+	8070:  "ucs-isc",
+	8074:  "gadugadu",
+	8077:  "mles",
+	8080:  "http-alt",
+	8081:  "sunproxyadmin",
+	8082:  "us-cli",
+	8083:  "us-srv",
+	8086:  "d-s-n",
+	8087:  "simplifymedia",
+	8088:  "radan-http",
+	8090:  "opsmessaging",
+	8091:  "jamlink",
+	8097:  "sac",
+	8100:  "xprint-server",
+	8101:  "ldoms-migr",
+	8102:  "kz-migr",
+	8115:  "mtl8000-matrix",
+	8116:  "cp-cluster",
+	8117:  "purityrpc",
+	8118:  "privoxy",
+	8121:  "apollo-data",
+	8122:  "apollo-admin",
+	8128:  "paycash-online",
+	8129:  "paycash-wbp",
+	8130:  "indigo-vrmi",
+	8131:  "indigo-vbcp",
+	8132:  "dbabble",
+	8140:  "puppet",
+	8148:  "isdd",
+	8153:  "quantastor",
+	8160:  "patrol",
+	8161:  "patrol-snmp",
+	8162:  "lpar2rrd",
+	8181:  "intermapper",
+	8182:  "vmware-fdm",
+	8183:  "proremote",
+	8184:  "itach",
+	8190:  "gcp-rphy",
+	8191:  "limnerpressure",
+	8192:  "spytechphone",
+	8194:  "blp1",
+	8195:  "blp2",
+	8199:  "vvr-data",
+	8200:  "trivnet1",
+	8201:  "trivnet2",
+	8204:  "lm-perfworks",
+	8205:  "lm-instmgr",
+	8206:  "lm-dta",
+	8207:  "lm-sserver",
+	8208:  "lm-webwatcher",
+	8230:  "rexecj",
+	8243:  "synapse-nhttps",
+	8270:  "robot-remote",
+	8276:  "pando-sec",
+	8280:  "synapse-nhttp",
+	8282:  "libelle",
+	8292:  "blp3",
+	8293:  "hiperscan-id",
+	8294:  "blp4",
+	8300:  "tmi",
+	8301:  "amberon",
+	8313:  "hub-open-net",
+	8320:  "tnp-discover",
+	8321:  "tnp",
+	8322:  "garmin-marine",
+	8351:  "server-find",
+	8376:  "cruise-enum",
+	8377:  "cruise-swroute",
+	8378:  "cruise-config",
+	8379:  "cruise-diags",
+	8380:  "cruise-update",
+	8383:  "m2mservices",
+	8400:  "cvd",
+	8401:  "sabarsd",
+	8402:  "abarsd",
+	8403:  "admind",
+	8404:  "svcloud",
+	8405:  "svbackup",
+	8415:  "dlpx-sp",
+	8416:  "espeech",
+	8417:  "espeech-rtp",
+	8423:  "aritts",
+	8442:  "cybro-a-bus",
+	8443:  "pcsync-https",
+	8444:  "pcsync-http",
+	8445:  "copy",
+	8450:  "npmp",
+	8457:  "nexentamv",
+	8470:  "cisco-avp",
+	8471:  "pim-port",
+	8472:  "otv",
+	8473:  "vp2p",
+	8474:  "noteshare",
+	8500:  "fmtp",
+	8501:  "cmtp-mgt",
+	8502:  "ftnmtp",
+	8554:  "rtsp-alt",
+	8555:  "d-fence",
+	8567:  "dof-tunnel",
+	8600:  "asterix",
+	8610:  "canon-mfnp",
+	8611:  "canon-bjnp1",
+	8612:  "canon-bjnp2",
+	8613:  "canon-bjnp3",
+	8614:  "canon-bjnp4",
+	8615:  "imink",
+	8665:  "monetra",
+	8666:  "monetra-admin",
+	8675:  "msi-cps-rm",
+	8686:  "sun-as-jmxrmi",
+	8688:  "openremote-ctrl",
+	8699:  "vnyx",
+	8711:  "nvc",
+	8733:  "ibus",
+	8750:  "dey-keyneg",
+	8763:  "mc-appserver",
+	8764:  "openqueue",
+	8765:  "ultraseek-http",
+	8766:  "amcs",
+	8770:  "dpap",
+	8778:  "uec",
+	8786:  "msgclnt",
+	8787:  "msgsrvr",
+	8793:  "acd-pm",
+	8800:  "sunwebadmin",
+	8804:  "truecm",
+	8873:  "dxspider",
+	8880:  "cddbp-alt",
+	8881:  "galaxy4d",
+	8883:  "secure-mqtt",
+	8888:  "ddi-tcp-1",
+	8889:  "ddi-tcp-2",
+	8890:  "ddi-tcp-3",
+	8891:  "ddi-tcp-4",
+	8892:  "ddi-tcp-5",
+	8893:  "ddi-tcp-6",
+	8894:  "ddi-tcp-7",
+	8899:  "ospf-lite",
+	8900:  "jmb-cds1",
+	8901:  "jmb-cds2",
+	8910:  "manyone-http",
+	8911:  "manyone-xml",
+	8912:  "wcbackup",
+	8913:  "dragonfly",
+	8937:  "twds",
+	8953:  "ub-dns-control",
+	8954:  "cumulus-admin",
+	8980:  "nod-provider",
+	8989:  "sunwebadmins",
+	8990:  "http-wmap",
+	8991:  "https-wmap",
+	8997:  "oracle-ms-ens",
+	8998:  "canto-roboflow",
+	8999:  "bctp",
+	9000:  "cslistener",
+	9001:  "etlservicemgr",
+	9002:  "dynamid",
+	9005:  "golem",
+	9008:  "ogs-server",
+	9009:  "pichat",
+	9010:  "sdr",
+	9020:  "tambora",
+	9021:  "panagolin-ident",
+	9022:  "paragent",
+	9023:  "swa-1",
+	9024:  "swa-2",
+	9025:  "swa-3",
+	9026:  "swa-4",
+	9050:  "versiera",
+	9051:  "fio-cmgmt",
+	9060:  "CardWeb-IO",
+	9080:  "glrpc",
+	9083:  "emc-pp-mgmtsvc",
+	9084:  "aurora",
+	9085:  "ibm-rsyscon",
+	9086:  "net2display",
+	9087:  "classic",
+	9088:  "sqlexec",
+	9089:  "sqlexec-ssl",
+	9090:  "websm",
+	9091:  "xmltec-xmlmail",
+	9092:  "XmlIpcRegSvc",
+	9093:  "copycat",
+	9100:  "hp-pdl-datastr",
+	9101:  "bacula-dir",
+	9102:  "bacula-fd",
+	9103:  "bacula-sd",
+	9104:  "peerwire",
+	9105:  "xadmin",
+	9106:  "astergate",
+	9107:  "astergatefax",
+	9119:  "mxit",
+	9122:  "grcmp",
+	9123:  "grcp",
+	9131:  "dddp",
+	9160:  "apani1",
+	9161:  "apani2",
+	9162:  "apani3",
+	9163:  "apani4",
+	9164:  "apani5",
+	9191:  "sun-as-jpda",
+	9200:  "wap-wsp",
+	9201:  "wap-wsp-wtp",
+	9202:  "wap-wsp-s",
+	9203:  "wap-wsp-wtp-s",
+	9204:  "wap-vcard",
+	9205:  "wap-vcal",
+	9206:  "wap-vcard-s",
+	9207:  "wap-vcal-s",
+	9208:  "rjcdb-vcards",
+	9209:  "almobile-system",
+	9210:  "oma-mlp",
+	9211:  "oma-mlp-s",
+	9212:  "serverviewdbms",
+	9213:  "serverstart",
+	9214:  "ipdcesgbs",
+	9215:  "insis",
+	9216:  "acme",
+	9217:  "fsc-port",
+	9222:  "teamcoherence",
+	9255:  "mon",
+	9278:  "pegasus",
+	9279:  "pegasus-ctl",
+	9280:  "pgps",
+	9281:  "swtp-port1",
+	9282:  "swtp-port2",
+	9283:  "callwaveiam",
+	9284:  "visd",
+	9285:  "n2h2server",
+	9287:  "cumulus",
+	9292:  "armtechdaemon",
+	9293:  "storview",
+	9294:  "armcenterhttp",
+	9295:  "armcenterhttps",
+	9300:  "vrace",
+	9306:  "sphinxql",
+	9312:  "sphinxapi",
+	9318:  "secure-ts",
+	9321:  "guibase",
+	9343:  "mpidcmgr",
+	9344:  "mphlpdmc",
+	9345:  "rancher",
+	9346:  "ctechlicensing",
+	9374:  "fjdmimgr",
+	9380:  "boxp",
+	9387:  "d2dconfig",
+	9388:  "d2ddatatrans",
+	9389:  "adws",
+	9390:  "otp",
+	9396:  "fjinvmgr",
+	9397:  "mpidcagt",
+	9400:  "sec-t4net-srv",
+	9401:  "sec-t4net-clt",
+	9402:  "sec-pc2fax-srv",
+	9418:  "git",
+	9443:  "tungsten-https",
+	9444:  "wso2esb-console",
+	9445:  "mindarray-ca",
+	9450:  "sntlkeyssrvr",
+	9500:  "ismserver",
+	9535:  "mngsuite",
+	9536:  "laes-bf",
+	9555:  "trispen-sra",
+	9592:  "ldgateway",
+	9593:  "cba8",
+	9594:  "msgsys",
+	9595:  "pds",
+	9596:  "mercury-disc",
+	9597:  "pd-admin",
+	9598:  "vscp",
+	9599:  "robix",
+	9600:  "micromuse-ncpw",
+	9612:  "streamcomm-ds",
+	9614:  "iadt-tls",
+	9616:  "erunbook-agent",
+	9617:  "erunbook-server",
+	9618:  "condor",
+	9628:  "odbcpathway",
+	9629:  "uniport",
+	9630:  "peoctlr",
+	9631:  "peocoll",
+	9640:  "pqsflows",
+	9666:  "zoomcp",
+	9667:  "xmms2",
+	9668:  "tec5-sdctp",
+	9694:  "client-wakeup",
+	9695:  "ccnx",
+	9700:  "board-roar",
+	9747:  "l5nas-parchan",
+	9750:  "board-voip",
+	9753:  "rasadv",
+	9762:  "tungsten-http",
+	9800:  "davsrc",
+	9801:  "sstp-2",
+	9802:  "davsrcs",
+	9875:  "sapv1",
+	9876:  "sd",
+	9888:  "cyborg-systems",
+	9889:  "gt-proxy",
+	9898:  "monkeycom",
+	9900:  "iua",
+	9909:  "domaintime",
+	9911:  "sype-transport",
+	9925:  "xybrid-cloud",
+	9950:  "apc-9950",
+	9951:  "apc-9951",
+	9952:  "apc-9952",
+	9953:  "acis",
+	9954:  "hinp",
+	9955:  "alljoyn-stm",
+	9966:  "odnsp",
+	9978:  "xybrid-rt",
+	9979:  "visweather",
+	9981:  "pumpkindb",
+	9987:  "dsm-scm-target",
+	9988:  "nsesrvr",
+	9990:  "osm-appsrvr",
+	9991:  "osm-oev",
+	9992:  "palace-1",
+	9993:  "palace-2",
+	9994:  "palace-3",
+	9995:  "palace-4",
+	9996:  "palace-5",
+	9997:  "palace-6",
+	9998:  "distinct32",
+	9999:  "distinct",
+	10000: "ndmp",
+	10001: "scp-config",
+	10002: "documentum",
+	10003: "documentum-s",
+	10004: "emcrmirccd",
+	10005: "emcrmird",
+	10006: "netapp-sync",
+	10007: "mvs-capacity",
+	10008: "octopus",
+	10009: "swdtp-sv",
+	10010: "rxapi",
+	10020: "abb-hw",
+	10050: "zabbix-agent",
+	10051: "zabbix-trapper",
+	10055: "qptlmd",
+	10080: "amanda",
+	10081: "famdc",
+	10100: "itap-ddtp",
+	10101: "ezmeeting-2",
+	10102: "ezproxy-2",
+	10103: "ezrelay",
+	10104: "swdtp",
+	10107: "bctp-server",
+	10110: "nmea-0183",
+	10113: "netiq-endpoint",
+	10114: "netiq-qcheck",
+	10115: "netiq-endpt",
+	10116: "netiq-voipa",
+	10117: "iqrm",
+	10125: "cimple",
+	10128: "bmc-perf-sd",
+	10129: "bmc-gms",
+	10160: "qb-db-server",
+	10161: "snmptls",
+	10162: "snmptls-trap",
+	10200: "trisoap",
+	10201: "rsms",
+	10252: "apollo-relay",
+	10260: "axis-wimp-port",
+	10261: "tile-ml",
+	10288: "blocks",
+	10321: "cosir",
+	10540: "MOS-lower",
+	10541: "MOS-upper",
+	10542: "MOS-aux",
+	10543: "MOS-soap",
+	10544: "MOS-soap-opt",
+	10548: "serverdocs",
+	10631: "printopia",
+	10800: "gap",
+	10805: "lpdg",
+	10809: "nbd",
+	10860: "helix",
+	10880: "bveapi",
+	10933: "octopustentacle",
+	10990: "rmiaux",
+	11000: "irisa",
+	11001: "metasys",
+	11095: "weave",
+	11103: "origo-sync",
+	11104: "netapp-icmgmt",
+	11105: "netapp-icdata",
+	11106: "sgi-lk",
+	11109: "sgi-dmfmgr",
+	11110: "sgi-soap",
+	11111: "vce",
+	11112: "dicom",
+	11161: "suncacao-snmp",
+	11162: "suncacao-jmxmp",
+	11163: "suncacao-rmi",
+	11164: "suncacao-csa",
+	11165: "suncacao-websvc",
+	11172: "oemcacao-jmxmp",
+	11173: "t5-straton",
+	11174: "oemcacao-rmi",
+	11175: "oemcacao-websvc",
+	11201: "smsqp",
+	11202: "dcsl-backup",
+	11208: "wifree",
+	11211: "memcache",
+	11319: "imip",
+	11320: "imip-channels",
+	11321: "arena-server",
+	11367: "atm-uhas",
+	11371: "hkp",
+	11489: "asgcypresstcps",
+	11600: "tempest-port",
+	11623: "emc-xsw-dconfig",
+	11720: "h323callsigalt",
+	11723: "emc-xsw-dcache",
+	11751: "intrepid-ssl",
+	11796: "lanschool",
+	11876: "xoraya",
+	11967: "sysinfo-sp",
+	12000: "entextxid",
+	12001: "entextnetwk",
+	12002: "entexthigh",
+	12003: "entextmed",
+	12004: "entextlow",
+	12005: "dbisamserver1",
+	12006: "dbisamserver2",
+	12007: "accuracer",
+	12008: "accuracer-dbms",
+	12010: "edbsrvr",
+	12012: "vipera",
+	12013: "vipera-ssl",
+	12109: "rets-ssl",
+	12121: "nupaper-ss",
+	12168: "cawas",
+	12172: "hivep",
+	12300: "linogridengine",
+	12302: "rads",
+	12321: "warehouse-sss",
+	12322: "warehouse",
+	12345: "italk",
+	12753: "tsaf",
+	12865: "netperf",
+	13160: "i-zipqd",
+	13216: "bcslogc",
+	13217: "rs-pias",
+	13218: "emc-vcas-tcp",
+	13223: "powwow-client",
+	13224: "powwow-server",
+	13400: "doip-data",
+	13720: "bprd",
+	13721: "bpdbm",
+	13722: "bpjava-msvc",
+	13724: "vnetd",
+	13782: "bpcd",
+	13783: "vopied",
+	13785: "nbdb",
+	13786: "nomdb",
+	13818: "dsmcc-config",
+	13819: "dsmcc-session",
+	13820: "dsmcc-passthru",
+	13821: "dsmcc-download",
+	13822: "dsmcc-ccp",
+	13823: "bmdss",
+	13894: "ucontrol",
+	13929: "dta-systems",
+	13930: "medevolve",
+	14000: "scotty-ft",
+	14001: "sua",
+	14033: "sage-best-com1",
+	14034: "sage-best-com2",
+	14141: "vcs-app",
+	14142: "icpp",
+	14143: "icpps",
+	14145: "gcm-app",
+	14149: "vrts-tdd",
+	14150: "vcscmd",
+	14154: "vad",
+	14250: "cps",
+	14414: "ca-web-update",
+	14500: "xpra",
+	14936: "hde-lcesrvr-1",
+	14937: "hde-lcesrvr-2",
+	15000: "hydap",
+	15002: "onep-tls",
+	15345: "xpilot",
+	15363: "3link",
+	15555: "cisco-snat",
+	15660: "bex-xr",
+	15740: "ptp",
+	15999: "programmar",
+	16000: "fmsas",
+	16001: "fmsascon",
+	16002: "gsms",
+	16020: "jwpc",
+	16021: "jwpc-bin",
+	16161: "sun-sea-port",
+	16162: "solaris-audit",
+	16309: "etb4j",
+	16310: "pduncs",
+	16311: "pdefmns",
+	16360: "netserialext1",
+	16361: "netserialext2",
+	16367: "netserialext3",
+	16368: "netserialext4",
+	16384: "connected",
+	16385: "rdgs",
+	16619: "xoms",
+	16665: "axon-tunnel",
+	16789: "cadsisvr",
+	16900: "newbay-snc-mc",
+	16950: "sgcip",
+	16991: "intel-rci-mp",
+	16992: "amt-soap-http",
+	16993: "amt-soap-https",
+	16994: "amt-redir-tcp",
+	16995: "amt-redir-tls",
+	17007: "isode-dua",
+	17184: "vestasdlp",
+	17185: "soundsvirtual",
+	17219: "chipper",
+	17220: "avtp",
+	17221: "avdecc",
+	17223: "isa100-gci",
+	17225: "trdp-md",
+	17234: "integrius-stp",
+	17235: "ssh-mgmt",
+	17500: "db-lsp",
+	17555: "ailith",
+	17729: "ea",
+	17754: "zep",
+	17755: "zigbee-ip",
+	17756: "zigbee-ips",
+	17777: "sw-orion",
+	18000: "biimenu",
+	18104: "radpdf",
+	18136: "racf",
+	18181: "opsec-cvp",
+	18182: "opsec-ufp",
+	18183: "opsec-sam",
+	18184: "opsec-lea",
+	18185: "opsec-omi",
+	18186: "ohsc",
+	18187: "opsec-ela",
+	18241: "checkpoint-rtm",
+	18242: "iclid",
+	18243: "clusterxl",
+	18262: "gv-pf",
+	18463: "ac-cluster",
+	18634: "rds-ib",
+	18635: "rds-ip",
+	18668: "vdmmesh",
+	18769: "ique",
+	18881: "infotos",
+	18888: "apc-necmp",
+	19000: "igrid",
+	19007: "scintilla",
+	19020: "j-link",
+	19191: "opsec-uaa",
+	19194: "ua-secureagent",
+	19220: "cora",
+	19283: "keysrvr",
+	19315: "keyshadow",
+	19398: "mtrgtrans",
+	19410: "hp-sco",
+	19411: "hp-sca",
+	19412: "hp-sessmon",
+	19539: "fxuptp",
+	19540: "sxuptp",
+	19541: "jcp",
+	19998: "iec-104-sec",
+	19999: "dnp-sec",
+	20000: "dnp",
+	20001: "microsan",
+	20002: "commtact-http",
+	20003: "commtact-https",
+	20005: "openwebnet",
+	20013: "ss-idi",
+	20014: "opendeploy",
+	20034: "nburn-id",
+	20046: "tmophl7mts",
+	20048: "mountd",
+	20049: "nfsrdma",
+	20057: "avesterra",
+	20167: "tolfab",
+	20202: "ipdtp-port",
+	20222: "ipulse-ics",
+	20480: "emwavemsg",
+	20670: "track",
+	20999: "athand-mmp",
+	21000: "irtrans",
+	21010: "notezilla-lan",
+	21221: "aigairserver",
+	21553: "rdm-tfs",
+	21554: "dfserver",
+	21590: "vofr-gateway",
+	21800: "tvpm",
+	21845: "webphone",
+	21846: "netspeak-is",
+	21847: "netspeak-cs",
+	21848: "netspeak-acd",
+	21849: "netspeak-cps",
+	22000: "snapenetio",
+	22001: "optocontrol",
+	22002: "optohost002",
+	22003: "optohost003",
+	22004: "optohost004",
+	22005: "optohost004",
+	22125: "dcap",
+	22128: "gsidcap",
+	22222: "easyengine",
+	22273: "wnn6",
+	22305: "cis",
+	22335: "shrewd-control",
+	22343: "cis-secure",
+	22347: "wibukey",
+	22350: "codemeter",
+	22351: "codemeter-cmwan",
+	22537: "caldsoft-backup",
+	22555: "vocaltec-wconf",
+	22763: "talikaserver",
+	22800: "aws-brf",
+	22951: "brf-gw",
+	23000: "inovaport1",
+	23001: "inovaport2",
+	23002: "inovaport3",
+	23003: "inovaport4",
+	23004: "inovaport5",
+	23005: "inovaport6",
+	23053: "gntp",
+	23294: "5afe-dir",
+	23333: "elxmgmt",
+	23400: "novar-dbase",
+	23401: "novar-alarm",
+	23402: "novar-global",
+	23456: "aequus",
+	23457: "aequus-alt",
+	23546: "areaguard-neo",
+	24000: "med-ltp",
+	24001: "med-fsp-rx",
+	24002: "med-fsp-tx",
+	24003: "med-supp",
+	24004: "med-ovw",
+	24005: "med-ci",
+	24006: "med-net-svc",
+	24242: "filesphere",
+	24249: "vista-4gl",
+	24321: "ild",
+	24386: "intel-rci",
+	24465: "tonidods",
+	24554: "binkp",
+	24577: "bilobit",
+	24666: "sdtvwcam",
+	24676: "canditv",
+	24677: "flashfiler",
+	24678: "proactivate",
+	24680: "tcc-http",
+	24754: "cslg",
+	24922: "find",
+	25000: "icl-twobase1",
+	25001: "icl-twobase2",
+	25002: "icl-twobase3",
+	25003: "icl-twobase4",
+	25004: "icl-twobase5",
+	25005: "icl-twobase6",
+	25006: "icl-twobase7",
+	25007: "icl-twobase8",
+	25008: "icl-twobase9",
+	25009: "icl-twobase10",
+	25576: "sauterdongle",
+	25604: "idtp",
+	25793: "vocaltec-hos",
+	25900: "tasp-net",
+	25901: "niobserver",
+	25902: "nilinkanalyst",
+	25903: "niprobe",
+	26000: "quake",
+	26133: "scscp",
+	26208: "wnn6-ds",
+	26257: "cockroach",
+	26260: "ezproxy",
+	26261: "ezmeeting",
+	26262: "k3software-svr",
+	26263: "k3software-cli",
+	26486: "exoline-tcp",
+	26487: "exoconfig",
+	26489: "exonet",
+	27345: "imagepump",
+	27442: "jesmsjc",
+	27504: "kopek-httphead",
+	27782: "ars-vista",
+	27876: "astrolink",
+	27999: "tw-auth-key",
+	28000: "nxlmd",
+	28001: "pqsp",
+	28200: "voxelstorm",
+	28240: "siemensgsm",
+	28589: "bosswave",
+	29167: "otmp",
+	29999: "bingbang",
+	30000: "ndmps",
+	30001: "pago-services1",
+	30002: "pago-services2",
+	30003: "amicon-fpsu-ra",
+	30100: "rwp",
+	30260: "kingdomsonline",
+	30400: "gs-realtime",
+	30999: "ovobs",
+	31016: "ka-sddp",
+	31020: "autotrac-acp",
+	31400: "pace-licensed",
+	31416: "xqosd",
+	31457: "tetrinet",
+	31620: "lm-mon",
+	31685: "dsx-monitor",
+	31765: "gamesmith-port",
+	31948: "iceedcp-tx",
+	31949: "iceedcp-rx",
+	32034: "iracinghelper",
+	32249: "t1distproc60",
+	32400: "plex",
+	32483: "apm-link",
+	32635: "sec-ntb-clnt",
+	32636: "DMExpress",
+	32767: "filenet-powsrm",
+	32768: "filenet-tms",
+	32769: "filenet-rpc",
+	32770: "filenet-nch",
+	32771: "filenet-rmi",
+	32772: "filenet-pa",
+	32773: "filenet-cm",
+	32774: "filenet-re",
+	32775: "filenet-pch",
+	32776: "filenet-peior",
+	32777: "filenet-obrok",
+	32801: "mlsn",
+	32811: "retp",
+	32896: "idmgratm",
+	33060: "mysqlx",
+	33123: "aurora-balaena",
+	33331: "diamondport",
+	33333: "dgi-serv",
+	33334: "speedtrace",
+	33434: "traceroute",
+	33656: "snip-slave",
+	34249: "turbonote-2",
+	34378: "p-net-local",
+	34379: "p-net-remote",
+	34567: "dhanalakshmi",
+	34962: "profinet-rt",
+	34963: "profinet-rtm",
+	34964: "profinet-cm",
+	34980: "ethercat",
+	35000: "heathview",
+	35001: "rt-viewer",
+	35002: "rt-sound",
+	35003: "rt-devicemapper",
+	35004: "rt-classmanager",
+	35005: "rt-labtracker",
+	35006: "rt-helper",
+	35100: "axio-disc",
+	35354: "kitim",
+	35355: "altova-lm",
+	35356: "guttersnex",
+	35357: "openstack-id",
+	36001: "allpeers",
+	36524: "febooti-aw",
+	36602: "observium-agent",
+	36700: "mapx",
+	36865: "kastenxpipe",
+	37475: "neckar",
+	37483: "gdrive-sync",
+	37601: "eftp",
+	37654: "unisys-eportal",
+	38000: "ivs-database",
+	38001: "ivs-insertion",
+	38002: "cresco-control",
+	38201: "galaxy7-data",
+	38202: "fairview",
+	38203: "agpolicy",
+	38800: "sruth",
+	38865: "secrmmsafecopya",
+	39681: "turbonote-1",
+	40000: "safetynetp",
+	40404: "sptx",
+	40841: "cscp",
+	40842: "csccredir",
+	40843: "csccfirewall",
+	41111: "fs-qos",
+	41121: "tentacle",
+	41230: "z-wave-s",
+	41794: "crestron-cip",
+	41795: "crestron-ctp",
+	41796: "crestron-cips",
+	41797: "crestron-ctps",
+	42508: "candp",
+	42509: "candrp",
+	42510: "caerpc",
+	43000: "recvr-rc",
+	43188: "reachout",
+	43189: "ndm-agent-port",
+	43190: "ip-provision",
+	43191: "noit-transport",
+	43210: "shaperai",
+	43439: "eq3-update",
+	43440: "ew-mgmt",
+	43441: "ciscocsdb",
+	44123: "z-wave-tunnel",
+	44321: "pmcd",
+	44322: "pmcdproxy",
+	44323: "pmwebapi",
+	44444: "cognex-dataman",
+	44553: "rbr-debug",
+	44818: "EtherNet-IP-2",
+	44900: "m3da",
+	45000: "asmp",
+	45001: "asmps",
+	45002: "rs-status",
+	45045: "synctest",
+	45054: "invision-ag",
+	45514: "cloudcheck",
+	45678: "eba",
+	45824: "dai-shell",
+	45825: "qdb2service",
+	45966: "ssr-servermgr",
+	46336: "inedo",
+	46998: "spremotetablet",
+	46999: "mediabox",
+	47000: "mbus",
+	47001: "winrm",
+	47557: "dbbrowse",
+	47624: "directplaysrvr",
+	47806: "ap",
+	47808: "bacnet",
+	48000: "nimcontroller",
+	48001: "nimspooler",
+	48002: "nimhub",
+	48003: "nimgtw",
+	48004: "nimbusdb",
+	48005: "nimbusdbctrl",
+	48049: "3gpp-cbsp",
+	48050: "weandsf",
+	48128: "isnetserv",
+	48129: "blp5",
+	48556: "com-bardac-dw",
+	48619: "iqobject",
+	48653: "robotraconteur",
+	49000: "matahari",
+	49001: "nusrp",
+}
+var udpPortNames = map[UDPPort]string{
+	1:     "tcpmux",
+	2:     "compressnet",
+	3:     "compressnet",
+	5:     "rje",
+	7:     "echo",
+	9:     "discard",
+	11:    "systat",
+	13:    "daytime",
+	17:    "qotd",
+	18:    "msp",
+	19:    "chargen",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	23:    "telnet",
+	25:    "smtp",
+	27:    "nsw-fe",
+	29:    "msg-icp",
+	31:    "msg-auth",
+	33:    "dsp",
+	37:    "time",
+	38:    "rap",
+	39:    "rlp",
+	41:    "graphics",
+	42:    "name",
+	43:    "nicname",
+	44:    "mpm-flags",
+	45:    "mpm",
+	46:    "mpm-snd",
+	48:    "auditd",
+	49:    "tacacs",
+	50:    "re-mail-ck",
+	52:    "xns-time",
+	53:    "domain",
+	54:    "xns-ch",
+	55:    "isi-gl",
+	56:    "xns-auth",
+	58:    "xns-mail",
+	62:    "acas",
+	63:    "whoispp",
+	64:    "covia",
+	65:    "tacacs-ds",
+	66:    "sql-net",
+	67:    "bootps",
+	68:    "bootpc",
+	69:    "tftp",
+	70:    "gopher",
+	71:    "netrjs-1",
+	72:    "netrjs-2",
+	73:    "netrjs-3",
+	74:    "netrjs-4",
+	76:    "deos",
+	78:    "vettcp",
+	79:    "finger",
+	80:    "http",
+	82:    "xfer",
+	83:    "mit-ml-dev",
+	84:    "ctf",
+	85:    "mit-ml-dev",
+	86:    "mfcobol",
+	88:    "kerberos",
+	89:    "su-mit-tg",
+	90:    "dnsix",
+	91:    "mit-dov",
+	92:    "npp",
+	93:    "dcp",
+	94:    "objcall",
+	95:    "supdup",
+	96:    "dixie",
+	97:    "swift-rvf",
+	98:    "tacnews",
+	99:    "metagram",
+	101:   "hostname",
+	102:   "iso-tsap",
+	103:   "gppitnp",
+	104:   "acr-nema",
+	105:   "cso",
+	106:   "3com-tsmux",
+	107:   "rtelnet",
+	108:   "snagas",
+	109:   "pop2",
+	110:   "pop3",
+	111:   "sunrpc",
+	112:   "mcidas",
+	113:   "auth",
+	115:   "sftp",
+	116:   "ansanotify",
+	117:   "uucp-path",
+	118:   "sqlserv",
+	119:   "nntp",
+	120:   "cfdptkt",
+	121:   "erpc",
+	122:   "smakynet",
+	123:   "ntp",
+	124:   "ansatrader",
+	125:   "locus-map",
+	126:   "nxedit",
+	127:   "locus-con",
+	128:   "gss-xlicen",
+	129:   "pwdgen",
+	130:   "cisco-fna",
+	131:   "cisco-tna",
+	132:   "cisco-sys",
+	133:   "statsrv",
+	134:   "ingres-net",
+	135:   "epmap",
+	136:   "profile",
+	137:   "netbios-ns",
+	138:   "netbios-dgm",
+	139:   "netbios-ssn",
+	140:   "emfis-data",
+	141:   "emfis-cntl",
+	142:   "bl-idm",
+	143:   "imap",
+	144:   "uma",
+	145:   "uaac",
+	146:   "iso-tp0",
+	147:   "iso-ip",
+	148:   "jargon",
+	149:   "aed-512",
+	150:   "sql-net",
+	151:   "hems",
+	152:   "bftp",
+	153:   "sgmp",
+	154:   "netsc-prod",
+	155:   "netsc-dev",
+	156:   "sqlsrv",
+	157:   "knet-cmp",
+	158:   "pcmail-srv",
+	159:   "nss-routing",
+	160:   "sgmp-traps",
+	161:   "snmp",
+	162:   "snmptrap",
+	163:   "cmip-man",
+	164:   "cmip-agent",
+	165:   "xns-courier",
+	166:   "s-net",
+	167:   "namp",
+	168:   "rsvd",
+	169:   "send",
+	170:   "print-srv",
+	171:   "multiplex",
+	172:   "cl-1",
+	173:   "xyplex-mux",
+	174:   "mailq",
+	175:   "vmnet",
+	176:   "genrad-mux",
+	177:   "xdmcp",
+	178:   "nextstep",
+	179:   "bgp",
+	180:   "ris",
+	181:   "unify",
+	182:   "audit",
+	183:   "ocbinder",
+	184:   "ocserver",
+	185:   "remote-kis",
+	186:   "kis",
+	187:   "aci",
+	188:   "mumps",
+	189:   "qft",
+	190:   "gacp",
+	191:   "prospero",
+	192:   "osu-nms",
+	193:   "srmp",
+	194:   "irc",
+	195:   "dn6-nlm-aud",
+	196:   "dn6-smm-red",
+	197:   "dls",
+	198:   "dls-mon",
+	199:   "smux",
+	200:   "src",
+	201:   "at-rtmp",
+	202:   "at-nbp",
+	203:   "at-3",
+	204:   "at-echo",
+	205:   "at-5",
+	206:   "at-zis",
+	207:   "at-7",
+	208:   "at-8",
+	209:   "qmtp",
+	210:   "z39-50",
+	211:   "914c-g",
+	212:   "anet",
+	213:   "ipx",
+	214:   "vmpwscs",
+	215:   "softpc",
+	216:   "CAIlic",
+	217:   "dbase",
+	218:   "mpp",
+	219:   "uarps",
+	220:   "imap3",
+	221:   "fln-spx",
+	222:   "rsh-spx",
+	223:   "cdc",
+	224:   "masqdialer",
+	242:   "direct",
+	243:   "sur-meas",
+	244:   "inbusiness",
+	245:   "link",
+	246:   "dsp3270",
+	247:   "subntbcst-tftp",
+	248:   "bhfhs",
+	256:   "rap",
+	257:   "set",
+	259:   "esro-gen",
+	260:   "openport",
+	261:   "nsiiops",
+	262:   "arcisdms",
+	263:   "hdap",
+	264:   "bgmp",
+	265:   "x-bone-ctl",
+	266:   "sst",
+	267:   "td-service",
+	268:   "td-replica",
+	269:   "manet",
+	270:   "gist",
+	280:   "http-mgmt",
+	281:   "personal-link",
+	282:   "cableport-ax",
+	283:   "rescap",
+	284:   "corerjd",
+	286:   "fxp",
+	287:   "k-block",
+	308:   "novastorbakcup",
+	309:   "entrusttime",
+	310:   "bhmds",
+	311:   "asip-webadmin",
+	312:   "vslmp",
+	313:   "magenta-logic",
+	314:   "opalis-robot",
+	315:   "dpsi",
+	316:   "decauth",
+	317:   "zannet",
+	318:   "pkix-timestamp",
+	319:   "ptp-event",
+	320:   "ptp-general",
+	321:   "pip",
+	322:   "rtsps",
+	333:   "texar",
+	344:   "pdap",
+	345:   "pawserv",
+	346:   "zserv",
+	347:   "fatserv",
+	348:   "csi-sgwp",
+	349:   "mftp",
+	350:   "matip-type-a",
+	351:   "matip-type-b",
+	352:   "dtag-ste-sb",
+	353:   "ndsauth",
+	354:   "bh611",
+	355:   "datex-asn",
+	356:   "cloanto-net-1",
+	357:   "bhevent",
+	358:   "shrinkwrap",
+	359:   "nsrmp",
+	360:   "scoi2odialog",
+	361:   "semantix",
+	362:   "srssend",
+	363:   "rsvp-tunnel",
+	364:   "aurora-cmgr",
+	365:   "dtk",
+	366:   "odmr",
+	367:   "mortgageware",
+	368:   "qbikgdp",
+	369:   "rpc2portmap",
+	370:   "codaauth2",
+	371:   "clearcase",
+	372:   "ulistproc",
+	373:   "legent-1",
+	374:   "legent-2",
+	375:   "hassle",
+	376:   "nip",
+	377:   "tnETOS",
+	378:   "dsETOS",
+	379:   "is99c",
+	380:   "is99s",
+	381:   "hp-collector",
+	382:   "hp-managed-node",
+	383:   "hp-alarm-mgr",
+	384:   "arns",
+	385:   "ibm-app",
+	386:   "asa",
+	387:   "aurp",
+	388:   "unidata-ldm",
+	389:   "ldap",
+	390:   "uis",
+	391:   "synotics-relay",
+	392:   "synotics-broker",
+	393:   "meta5",
+	394:   "embl-ndt",
+	395:   "netcp",
+	396:   "netware-ip",
+	397:   "mptn",
+	398:   "kryptolan",
+	399:   "iso-tsap-c2",
+	400:   "osb-sd",
+	401:   "ups",
+	402:   "genie",
+	403:   "decap",
+	404:   "nced",
+	405:   "ncld",
+	406:   "imsp",
+	407:   "timbuktu",
+	408:   "prm-sm",
+	409:   "prm-nm",
+	410:   "decladebug",
+	411:   "rmt",
+	412:   "synoptics-trap",
+	413:   "smsp",
+	414:   "infoseek",
+	415:   "bnet",
+	416:   "silverplatter",
+	417:   "onmux",
+	418:   "hyper-g",
+	419:   "ariel1",
+	420:   "smpte",
+	421:   "ariel2",
+	422:   "ariel3",
+	423:   "opc-job-start",
+	424:   "opc-job-track",
+	425:   "icad-el",
+	426:   "smartsdp",
+	427:   "svrloc",
+	428:   "ocs-cmu",
+	429:   "ocs-amu",
+	430:   "utmpsd",
+	431:   "utmpcd",
+	432:   "iasd",
+	433:   "nnsp",
+	434:   "mobileip-agent",
+	435:   "mobilip-mn",
+	436:   "dna-cml",
+	437:   "comscm",
+	438:   "dsfgw",
+	439:   "dasp",
+	440:   "sgcp",
+	441:   "decvms-sysmgt",
+	442:   "cvc-hostd",
+	443:   "https",
+	444:   "snpp",
+	445:   "microsoft-ds",
+	446:   "ddm-rdb",
+	447:   "ddm-dfm",
+	448:   "ddm-ssl",
+	449:   "as-servermap",
+	450:   "tserver",
+	451:   "sfs-smp-net",
+	452:   "sfs-config",
+	453:   "creativeserver",
+	454:   "contentserver",
+	455:   "creativepartnr",
+	456:   "macon-udp",
+	457:   "scohelp",
+	458:   "appleqtc",
+	459:   "ampr-rcmd",
+	460:   "skronk",
+	461:   "datasurfsrv",
+	462:   "datasurfsrvsec",
+	463:   "alpes",
+	464:   "kpasswd",
+	465:   "igmpv3lite",
+	466:   "digital-vrc",
+	467:   "mylex-mapd",
+	468:   "photuris",
+	469:   "rcp",
+	470:   "scx-proxy",
+	471:   "mondex",
+	472:   "ljk-login",
+	473:   "hybrid-pop",
+	474:   "tn-tl-w2",
+	475:   "tcpnethaspsrv",
+	476:   "tn-tl-fd1",
+	477:   "ss7ns",
+	478:   "spsc",
+	479:   "iafserver",
+	480:   "iafdbase",
+	481:   "ph",
+	482:   "bgs-nsi",
+	483:   "ulpnet",
+	484:   "integra-sme",
+	485:   "powerburst",
+	486:   "avian",
+	487:   "saft",
+	488:   "gss-http",
+	489:   "nest-protocol",
+	490:   "micom-pfs",
+	491:   "go-login",
+	492:   "ticf-1",
+	493:   "ticf-2",
+	494:   "pov-ray",
+	495:   "intecourier",
+	496:   "pim-rp-disc",
+	497:   "retrospect",
+	498:   "siam",
+	499:   "iso-ill",
+	500:   "isakmp",
+	501:   "stmf",
+	502:   "mbap",
+	503:   "intrinsa",
+	504:   "citadel",
+	505:   "mailbox-lm",
+	506:   "ohimsrv",
+	507:   "crs",
+	508:   "xvttp",
+	509:   "snare",
+	510:   "fcp",
+	511:   "passgo",
+	512:   "comsat",
+	513:   "who",
+	514:   "syslog",
+	515:   "printer",
+	516:   "videotex",
+	517:   "talk",
+	518:   "ntalk",
+	519:   "utime",
+	520:   "router",
+	521:   "ripng",
+	522:   "ulp",
+	523:   "ibm-db2",
+	524:   "ncp",
+	525:   "timed",
+	526:   "tempo",
+	527:   "stx",
+	528:   "custix",
+	529:   "irc-serv",
+	530:   "courier",
+	531:   "conference",
+	532:   "netnews",
+	533:   "netwall",
+	534:   "windream",
+	535:   "iiop",
+	536:   "opalis-rdv",
+	537:   "nmsp",
+	538:   "gdomap",
+	539:   "apertus-ldp",
+	540:   "uucp",
+	541:   "uucp-rlogin",
+	542:   "commerce",
+	543:   "klogin",
+	544:   "kshell",
+	545:   "appleqtcsrvr",
+	546:   "dhcpv6-client",
+	547:   "dhcpv6-server",
+	548:   "afpovertcp",
+	549:   "idfp",
+	550:   "new-rwho",
+	551:   "cybercash",
+	552:   "devshr-nts",
+	553:   "pirp",
+	554:   "rtsp",
+	555:   "dsf",
+	556:   "remotefs",
+	557:   "openvms-sysipc",
+	558:   "sdnskmp",
+	559:   "teedtap",
+	560:   "rmonitor",
+	561:   "monitor",
+	562:   "chshell",
+	563:   "nntps",
+	564:   "9pfs",
+	565:   "whoami",
+	566:   "streettalk",
+	567:   "banyan-rpc",
+	568:   "ms-shuttle",
+	569:   "ms-rome",
+	570:   "meter",
+	571:   "meter",
+	572:   "sonar",
+	573:   "banyan-vip",
+	574:   "ftp-agent",
+	575:   "vemmi",
+	576:   "ipcd",
+	577:   "vnas",
+	578:   "ipdd",
+	579:   "decbsrv",
+	580:   "sntp-heartbeat",
+	581:   "bdp",
+	582:   "scc-security",
+	583:   "philips-vc",
+	584:   "keyserver",
+	586:   "password-chg",
+	587:   "submission",
+	588:   "cal",
+	589:   "eyelink",
+	590:   "tns-cml",
+	591:   "http-alt",
+	592:   "eudora-set",
+	593:   "http-rpc-epmap",
+	594:   "tpip",
+	595:   "cab-protocol",
+	596:   "smsd",
+	597:   "ptcnameservice",
+	598:   "sco-websrvrmg3",
+	599:   "acp",
+	600:   "ipcserver",
+	601:   "syslog-conn",
+	602:   "xmlrpc-beep",
+	603:   "idxp",
+	604:   "tunnel",
+	605:   "soap-beep",
+	606:   "urm",
+	607:   "nqs",
+	608:   "sift-uft",
+	609:   "npmp-trap",
+	610:   "npmp-local",
+	611:   "npmp-gui",
+	612:   "hmmp-ind",
+	613:   "hmmp-op",
+	614:   "sshell",
+	615:   "sco-inetmgr",
+	616:   "sco-sysmgr",
+	617:   "sco-dtmgr",
+	618:   "dei-icda",
+	619:   "compaq-evm",
+	620:   "sco-websrvrmgr",
+	621:   "escp-ip",
+	622:   "collaborator",
+	623:   "asf-rmcp",
+	624:   "cryptoadmin",
+	625:   "dec-dlm",
+	626:   "asia",
+	627:   "passgo-tivoli",
+	628:   "qmqp",
+	629:   "3com-amp3",
+	630:   "rda",
+	631:   "ipp",
+	632:   "bmpp",
+	633:   "servstat",
+	634:   "ginad",
+	635:   "rlzdbase",
+	636:   "ldaps",
+	637:   "lanserver",
+	638:   "mcns-sec",
+	639:   "msdp",
+	640:   "entrust-sps",
+	641:   "repcmd",
+	642:   "esro-emsdp",
+	643:   "sanity",
+	644:   "dwr",
+	645:   "pssc",
+	646:   "ldp",
+	647:   "dhcp-failover",
+	648:   "rrp",
+	649:   "cadview-3d",
+	650:   "obex",
+	651:   "ieee-mms",
+	652:   "hello-port",
+	653:   "repscmd",
+	654:   "aodv",
+	655:   "tinc",
+	656:   "spmp",
+	657:   "rmc",
+	658:   "tenfold",
+	660:   "mac-srvr-admin",
+	661:   "hap",
+	662:   "pftp",
+	663:   "purenoise",
+	664:   "asf-secure-rmcp",
+	665:   "sun-dr",
+	666:   "mdqs",
+	667:   "disclose",
+	668:   "mecomm",
+	669:   "meregister",
+	670:   "vacdsm-sws",
+	671:   "vacdsm-app",
+	672:   "vpps-qua",
+	673:   "cimplex",
+	674:   "acap",
+	675:   "dctp",
+	676:   "vpps-via",
+	677:   "vpp",
+	678:   "ggf-ncp",
+	679:   "mrm",
+	680:   "entrust-aaas",
+	681:   "entrust-aams",
+	682:   "xfr",
+	683:   "corba-iiop",
+	684:   "corba-iiop-ssl",
+	685:   "mdc-portmapper",
+	686:   "hcp-wismar",
+	687:   "asipregistry",
+	688:   "realm-rusd",
+	689:   "nmap",
+	690:   "vatp",
+	691:   "msexch-routing",
+	692:   "hyperwave-isp",
+	693:   "connendp",
+	694:   "ha-cluster",
+	695:   "ieee-mms-ssl",
+	696:   "rushd",
+	697:   "uuidgen",
+	698:   "olsr",
+	699:   "accessnetwork",
+	700:   "epp",
+	701:   "lmp",
+	702:   "iris-beep",
+	704:   "elcsd",
+	705:   "agentx",
+	706:   "silc",
+	707:   "borland-dsj",
+	709:   "entrust-kmsh",
+	710:   "entrust-ash",
+	711:   "cisco-tdp",
+	712:   "tbrpf",
+	713:   "iris-xpc",
+	714:   "iris-xpcs",
+	715:   "iris-lwz",
+	716:   "pana",
+	729:   "netviewdm1",
+	730:   "netviewdm2",
+	731:   "netviewdm3",
+	741:   "netgw",
+	742:   "netrcs",
+	744:   "flexlm",
+	747:   "fujitsu-dev",
+	748:   "ris-cm",
+	749:   "kerberos-adm",
+	750:   "loadav",
+	751:   "pump",
+	752:   "qrh",
+	753:   "rrh",
+	754:   "tell",
+	758:   "nlogin",
+	759:   "con",
+	760:   "ns",
+	761:   "rxe",
+	762:   "quotad",
+	763:   "cycleserv",
+	764:   "omserv",
+	765:   "webster",
+	767:   "phonebook",
+	769:   "vid",
+	770:   "cadlock",
+	771:   "rtip",
+	772:   "cycleserv2",
+	773:   "notify",
+	774:   "acmaint-dbd",
+	775:   "acmaint-transd",
+	776:   "wpages",
+	777:   "multiling-http",
+	780:   "wpgs",
+	800:   "mdbs-daemon",
+	801:   "device",
+	802:   "mbap-s",
+	810:   "fcp-udp",
+	828:   "itm-mcell-s",
+	829:   "pkix-3-ca-ra",
+	830:   "netconf-ssh",
+	831:   "netconf-beep",
+	832:   "netconfsoaphttp",
+	833:   "netconfsoapbeep",
+	847:   "dhcp-failover2",
+	848:   "gdoi",
+	853:   "domain-s",
+	854:   "dlep",
+	860:   "iscsi",
+	861:   "owamp-control",
+	862:   "twamp-control",
+	873:   "rsync",
+	886:   "iclcnet-locate",
+	887:   "iclcnet-svinfo",
+	888:   "accessbuilder",
+	900:   "omginitialrefs",
+	901:   "smpnameres",
+	902:   "ideafarm-door",
+	903:   "ideafarm-panic",
+	910:   "kink",
+	911:   "xact-backup",
+	912:   "apex-mesh",
+	913:   "apex-edge",
+	989:   "ftps-data",
+	990:   "ftps",
+	991:   "nas",
+	992:   "telnets",
+	993:   "imaps",
+	995:   "pop3s",
+	996:   "vsinet",
+	997:   "maitrd",
+	998:   "puparp",
+	999:   "applix",
+	1000:  "cadlock2",
+	1010:  "surf",
+	1021:  "exp1",
+	1022:  "exp2",
+	1025:  "blackjack",
+	1026:  "cap",
+	1027:  "6a44",
+	1029:  "solid-mux",
+	1033:  "netinfo-local",
+	1034:  "activesync",
+	1035:  "mxxrlogin",
+	1036:  "nsstp",
+	1037:  "ams",
+	1038:  "mtqp",
+	1039:  "sbl",
+	1040:  "netarx",
+	1041:  "danf-ak2",
+	1042:  "afrog",
+	1043:  "boinc-client",
+	1044:  "dcutility",
+	1045:  "fpitp",
+	1046:  "wfremotertm",
+	1047:  "neod1",
+	1048:  "neod2",
+	1049:  "td-postman",
+	1050:  "cma",
+	1051:  "optima-vnet",
+	1052:  "ddt",
+	1053:  "remote-as",
+	1054:  "brvread",
+	1055:  "ansyslmd",
+	1056:  "vfo",
+	1057:  "startron",
+	1058:  "nim",
+	1059:  "nimreg",
+	1060:  "polestar",
+	1061:  "kiosk",
+	1062:  "veracity",
+	1063:  "kyoceranetdev",
+	1064:  "jstel",
+	1065:  "syscomlan",
+	1066:  "fpo-fns",
+	1067:  "instl-boots",
+	1068:  "instl-bootc",
+	1069:  "cognex-insight",
+	1070:  "gmrupdateserv",
+	1071:  "bsquare-voip",
+	1072:  "cardax",
+	1073:  "bridgecontrol",
+	1074:  "warmspotMgmt",
+	1075:  "rdrmshc",
+	1076:  "dab-sti-c",
+	1077:  "imgames",
+	1078:  "avocent-proxy",
+	1079:  "asprovatalk",
+	1080:  "socks",
+	1081:  "pvuniwien",
+	1082:  "amt-esd-prot",
+	1083:  "ansoft-lm-1",
+	1084:  "ansoft-lm-2",
+	1085:  "webobjects",
+	1086:  "cplscrambler-lg",
+	1087:  "cplscrambler-in",
+	1088:  "cplscrambler-al",
+	1089:  "ff-annunc",
+	1090:  "ff-fms",
+	1091:  "ff-sm",
+	1092:  "obrpd",
+	1093:  "proofd",
+	1094:  "rootd",
+	1095:  "nicelink",
+	1096:  "cnrprotocol",
+	1097:  "sunclustermgr",
+	1098:  "rmiactivation",
+	1099:  "rmiregistry",
+	1100:  "mctp",
+	1101:  "pt2-discover",
+	1102:  "adobeserver-1",
+	1103:  "adobeserver-2",
+	1104:  "xrl",
+	1105:  "ftranhc",
+	1106:  "isoipsigport-1",
+	1107:  "isoipsigport-2",
+	1108:  "ratio-adp",
+	1110:  "nfsd-keepalive",
+	1111:  "lmsocialserver",
+	1112:  "icp",
+	1113:  "ltp-deepspace",
+	1114:  "mini-sql",
+	1115:  "ardus-trns",
+	1116:  "ardus-cntl",
+	1117:  "ardus-mtrns",
+	1118:  "sacred",
+	1119:  "bnetgame",
+	1120:  "bnetfile",
+	1121:  "rmpp",
+	1122:  "availant-mgr",
+	1123:  "murray",
+	1124:  "hpvmmcontrol",
+	1125:  "hpvmmagent",
+	1126:  "hpvmmdata",
+	1127:  "kwdb-commn",
+	1128:  "saphostctrl",
+	1129:  "saphostctrls",
+	1130:  "casp",
+	1131:  "caspssl",
+	1132:  "kvm-via-ip",
+	1133:  "dfn",
+	1134:  "aplx",
+	1135:  "omnivision",
+	1136:  "hhb-gateway",
+	1137:  "trim",
+	1138:  "encrypted-admin",
+	1139:  "evm",
+	1140:  "autonoc",
+	1141:  "mxomss",
+	1142:  "edtools",
+	1143:  "imyx",
+	1144:  "fuscript",
+	1145:  "x9-icue",
+	1146:  "audit-transfer",
+	1147:  "capioverlan",
+	1148:  "elfiq-repl",
+	1149:  "bvtsonar",
+	1150:  "blaze",
+	1151:  "unizensus",
+	1152:  "winpoplanmess",
+	1153:  "c1222-acse",
+	1154:  "resacommunity",
+	1155:  "nfa",
+	1156:  "iascontrol-oms",
+	1157:  "iascontrol",
+	1158:  "dbcontrol-oms",
+	1159:  "oracle-oms",
+	1160:  "olsv",
+	1161:  "health-polling",
+	1162:  "health-trap",
+	1163:  "sddp",
+	1164:  "qsm-proxy",
+	1165:  "qsm-gui",
+	1166:  "qsm-remote",
+	1167:  "cisco-ipsla",
+	1168:  "vchat",
+	1169:  "tripwire",
+	1170:  "atc-lm",
+	1171:  "atc-appserver",
+	1172:  "dnap",
+	1173:  "d-cinema-rrp",
+	1174:  "fnet-remote-ui",
+	1175:  "dossier",
+	1176:  "indigo-server",
+	1177:  "dkmessenger",
+	1178:  "sgi-storman",
+	1179:  "b2n",
+	1180:  "mc-client",
+	1181:  "3comnetman",
+	1182:  "accelenet-data",
+	1183:  "llsurfup-http",
+	1184:  "llsurfup-https",
+	1185:  "catchpole",
+	1186:  "mysql-cluster",
+	1187:  "alias",
+	1188:  "hp-webadmin",
+	1189:  "unet",
+	1190:  "commlinx-avl",
+	1191:  "gpfs",
+	1192:  "caids-sensor",
+	1193:  "fiveacross",
+	1194:  "openvpn",
+	1195:  "rsf-1",
+	1196:  "netmagic",
+	1197:  "carrius-rshell",
+	1198:  "cajo-discovery",
+	1199:  "dmidi",
+	1200:  "scol",
+	1201:  "nucleus-sand",
+	1202:  "caiccipc",
+	1203:  "ssslic-mgr",
+	1204:  "ssslog-mgr",
+	1205:  "accord-mgc",
+	1206:  "anthony-data",
+	1207:  "metasage",
+	1208:  "seagull-ais",
+	1209:  "ipcd3",
+	1210:  "eoss",
+	1211:  "groove-dpp",
+	1212:  "lupa",
+	1213:  "mpc-lifenet",
+	1214:  "kazaa",
+	1215:  "scanstat-1",
+	1216:  "etebac5",
+	1217:  "hpss-ndapi",
+	1218:  "aeroflight-ads",
+	1219:  "aeroflight-ret",
+	1220:  "qt-serveradmin",
+	1221:  "sweetware-apps",
+	1222:  "nerv",
+	1223:  "tgp",
+	1224:  "vpnz",
+	1225:  "slinkysearch",
+	1226:  "stgxfws",
+	1227:  "dns2go",
+	1228:  "florence",
+	1229:  "zented",
+	1230:  "periscope",
+	1231:  "menandmice-lpm",
+	1232:  "first-defense",
+	1233:  "univ-appserver",
+	1234:  "search-agent",
+	1235:  "mosaicsyssvc1",
+	1236:  "bvcontrol",
+	1237:  "tsdos390",
+	1238:  "hacl-qs",
+	1239:  "nmsd",
+	1240:  "instantia",
+	1241:  "nessus",
+	1242:  "nmasoverip",
+	1243:  "serialgateway",
+	1244:  "isbconference1",
+	1245:  "isbconference2",
+	1246:  "payrouter",
+	1247:  "visionpyramid",
+	1248:  "hermes",
+	1249:  "mesavistaco",
+	1250:  "swldy-sias",
+	1251:  "servergraph",
+	1252:  "bspne-pcc",
+	1253:  "q55-pcc",
+	1254:  "de-noc",
+	1255:  "de-cache-query",
+	1256:  "de-server",
+	1257:  "shockwave2",
+	1258:  "opennl",
+	1259:  "opennl-voice",
+	1260:  "ibm-ssd",
+	1261:  "mpshrsv",
+	1262:  "qnts-orb",
+	1263:  "dka",
+	1264:  "prat",
+	1265:  "dssiapi",
+	1266:  "dellpwrappks",
+	1267:  "epc",
+	1268:  "propel-msgsys",
+	1269:  "watilapp",
+	1270:  "opsmgr",
+	1271:  "excw",
+	1272:  "cspmlockmgr",
+	1273:  "emc-gateway",
+	1274:  "t1distproc",
+	1275:  "ivcollector",
+	1277:  "miva-mqs",
+	1278:  "dellwebadmin-1",
+	1279:  "dellwebadmin-2",
+	1280:  "pictrography",
+	1281:  "healthd",
+	1282:  "emperion",
+	1283:  "productinfo",
+	1284:  "iee-qfx",
+	1285:  "neoiface",
+	1286:  "netuitive",
+	1287:  "routematch",
+	1288:  "navbuddy",
+	1289:  "jwalkserver",
+	1290:  "winjaserver",
+	1291:  "seagulllms",
+	1292:  "dsdn",
+	1293:  "pkt-krb-ipsec",
+	1294:  "cmmdriver",
+	1295:  "ehtp",
+	1296:  "dproxy",
+	1297:  "sdproxy",
+	1298:  "lpcp",
+	1299:  "hp-sci",
+	1300:  "h323hostcallsc",
+	1301:  "ci3-software-1",
+	1302:  "ci3-software-2",
+	1303:  "sftsrv",
+	1304:  "boomerang",
+	1305:  "pe-mike",
+	1306:  "re-conn-proto",
+	1307:  "pacmand",
+	1308:  "odsi",
+	1309:  "jtag-server",
+	1310:  "husky",
+	1311:  "rxmon",
+	1312:  "sti-envision",
+	1313:  "bmc-patroldb",
+	1314:  "pdps",
+	1315:  "els",
+	1316:  "exbit-escp",
+	1317:  "vrts-ipcserver",
+	1318:  "krb5gatekeeper",
+	1319:  "amx-icsp",
+	1320:  "amx-axbnet",
+	1321:  "pip",
+	1322:  "novation",
+	1323:  "brcd",
+	1324:  "delta-mcp",
+	1325:  "dx-instrument",
+	1326:  "wimsic",
+	1327:  "ultrex",
+	1328:  "ewall",
+	1329:  "netdb-export",
+	1330:  "streetperfect",
+	1331:  "intersan",
+	1332:  "pcia-rxp-b",
+	1333:  "passwrd-policy",
+	1334:  "writesrv",
+	1335:  "digital-notary",
+	1336:  "ischat",
+	1337:  "menandmice-dns",
+	1338:  "wmc-log-svc",
+	1339:  "kjtsiteserver",
+	1340:  "naap",
+	1341:  "qubes",
+	1342:  "esbroker",
+	1343:  "re101",
+	1344:  "icap",
+	1345:  "vpjp",
+	1346:  "alta-ana-lm",
+	1347:  "bbn-mmc",
+	1348:  "bbn-mmx",
+	1349:  "sbook",
+	1350:  "editbench",
+	1351:  "equationbuilder",
+	1352:  "lotusnote",
+	1353:  "relief",
+	1354:  "XSIP-network",
+	1355:  "intuitive-edge",
+	1356:  "cuillamartin",
+	1357:  "pegboard",
+	1358:  "connlcli",
+	1359:  "ftsrv",
+	1360:  "mimer",
+	1361:  "linx",
+	1362:  "timeflies",
+	1363:  "ndm-requester",
+	1364:  "ndm-server",
+	1365:  "adapt-sna",
+	1366:  "netware-csp",
+	1367:  "dcs",
+	1368:  "screencast",
+	1369:  "gv-us",
+	1370:  "us-gv",
+	1371:  "fc-cli",
+	1372:  "fc-ser",
+	1373:  "chromagrafx",
+	1374:  "molly",
+	1375:  "bytex",
+	1376:  "ibm-pps",
+	1377:  "cichlid",
+	1378:  "elan",
+	1379:  "dbreporter",
+	1380:  "telesis-licman",
+	1381:  "apple-licman",
+	1382:  "udt-os",
+	1383:  "gwha",
+	1384:  "os-licman",
+	1385:  "atex-elmd",
+	1386:  "checksum",
+	1387:  "cadsi-lm",
+	1388:  "objective-dbc",
+	1389:  "iclpv-dm",
+	1390:  "iclpv-sc",
+	1391:  "iclpv-sas",
+	1392:  "iclpv-pm",
+	1393:  "iclpv-nls",
+	1394:  "iclpv-nlc",
+	1395:  "iclpv-wsm",
+	1396:  "dvl-activemail",
+	1397:  "audio-activmail",
+	1398:  "video-activmail",
+	1399:  "cadkey-licman",
+	1400:  "cadkey-tablet",
+	1401:  "goldleaf-licman",
+	1402:  "prm-sm-np",
+	1403:  "prm-nm-np",
+	1404:  "igi-lm",
+	1405:  "ibm-res",
+	1406:  "netlabs-lm",
+	1408:  "sophia-lm",
+	1409:  "here-lm",
+	1410:  "hiq",
+	1411:  "af",
+	1412:  "innosys",
+	1413:  "innosys-acl",
+	1414:  "ibm-mqseries",
+	1415:  "dbstar",
+	1416:  "novell-lu6-2",
+	1417:  "timbuktu-srv1",
+	1418:  "timbuktu-srv2",
+	1419:  "timbuktu-srv3",
+	1420:  "timbuktu-srv4",
+	1421:  "gandalf-lm",
+	1422:  "autodesk-lm",
+	1423:  "essbase",
+	1424:  "hybrid",
+	1425:  "zion-lm",
+	1426:  "sais",
+	1427:  "mloadd",
+	1428:  "informatik-lm",
+	1429:  "nms",
+	1430:  "tpdu",
+	1431:  "rgtp",
+	1432:  "blueberry-lm",
+	1433:  "ms-sql-s",
+	1434:  "ms-sql-m",
+	1435:  "ibm-cics",
+	1436:  "saism",
+	1437:  "tabula",
+	1438:  "eicon-server",
+	1439:  "eicon-x25",
+	1440:  "eicon-slp",
+	1441:  "cadis-1",
+	1442:  "cadis-2",
+	1443:  "ies-lm",
+	1444:  "marcam-lm",
+	1445:  "proxima-lm",
+	1446:  "ora-lm",
+	1447:  "apri-lm",
+	1448:  "oc-lm",
+	1449:  "peport",
+	1450:  "dwf",
+	1451:  "infoman",
+	1452:  "gtegsc-lm",
+	1453:  "genie-lm",
+	1454:  "interhdl-elmd",
+	1455:  "esl-lm",
+	1456:  "dca",
+	1457:  "valisys-lm",
+	1458:  "nrcabq-lm",
+	1459:  "proshare1",
+	1460:  "proshare2",
+	1461:  "ibm-wrless-lan",
+	1462:  "world-lm",
+	1463:  "nucleus",
+	1464:  "msl-lmd",
+	1465:  "pipes",
+	1466:  "oceansoft-lm",
+	1467:  "csdmbase",
+	1468:  "csdm",
+	1469:  "aal-lm",
+	1470:  "uaiact",
+	1471:  "csdmbase",
+	1472:  "csdm",
+	1473:  "openmath",
+	1474:  "telefinder",
+	1475:  "taligent-lm",
+	1476:  "clvm-cfg",
+	1477:  "ms-sna-server",
+	1478:  "ms-sna-base",
+	1479:  "dberegister",
+	1480:  "pacerforum",
+	1481:  "airs",
+	1482:  "miteksys-lm",
+	1483:  "afs",
+	1484:  "confluent",
+	1485:  "lansource",
+	1486:  "nms-topo-serv",
+	1487:  "localinfosrvr",
+	1488:  "docstor",
+	1489:  "dmdocbroker",
+	1490:  "insitu-conf",
+	1492:  "stone-design-1",
+	1493:  "netmap-lm",
+	1494:  "ica",
+	1495:  "cvc",
+	1496:  "liberty-lm",
+	1497:  "rfx-lm",
+	1498:  "sybase-sqlany",
+	1499:  "fhc",
+	1500:  "vlsi-lm",
+	1501:  "saiscm",
+	1502:  "shivadiscovery",
+	1503:  "imtc-mcs",
+	1504:  "evb-elm",
+	1505:  "funkproxy",
+	1506:  "utcd",
+	1507:  "symplex",
+	1508:  "diagmond",
+	1509:  "robcad-lm",
+	1510:  "mvx-lm",
+	1511:  "3l-l1",
+	1512:  "wins",
+	1513:  "fujitsu-dtc",
+	1514:  "fujitsu-dtcns",
+	1515:  "ifor-protocol",
+	1516:  "vpad",
+	1517:  "vpac",
+	1518:  "vpvd",
+	1519:  "vpvc",
+	1520:  "atm-zip-office",
+	1521:  "ncube-lm",
+	1522:  "ricardo-lm",
+	1523:  "cichild-lm",
+	1524:  "ingreslock",
+	1525:  "orasrv",
+	1526:  "pdap-np",
+	1527:  "tlisrv",
+	1528:  "ngr-t",
+	1529:  "coauthor",
+	1530:  "rap-service",
+	1531:  "rap-listen",
+	1532:  "miroconnect",
+	1533:  "virtual-places",
+	1534:  "micromuse-lm",
+	1535:  "ampr-info",
+	1536:  "ampr-inter",
+	1537:  "sdsc-lm",
+	1538:  "3ds-lm",
+	1539:  "intellistor-lm",
+	1540:  "rds",
+	1541:  "rds2",
+	1542:  "gridgen-elmd",
+	1543:  "simba-cs",
+	1544:  "aspeclmd",
+	1545:  "vistium-share",
+	1546:  "abbaccuray",
+	1547:  "laplink",
+	1548:  "axon-lm",
+	1549:  "shivasound",
+	1550:  "3m-image-lm",
+	1551:  "hecmtl-db",
+	1552:  "pciarray",
+	1553:  "sna-cs",
+	1554:  "caci-lm",
+	1555:  "livelan",
+	1556:  "veritas-pbx",
+	1557:  "arbortext-lm",
+	1558:  "xingmpeg",
+	1559:  "web2host",
+	1560:  "asci-val",
+	1561:  "facilityview",
+	1562:  "pconnectmgr",
+	1563:  "cadabra-lm",
+	1564:  "pay-per-view",
+	1565:  "winddlb",
+	1566:  "corelvideo",
+	1567:  "jlicelmd",
+	1568:  "tsspmap",
+	1569:  "ets",
+	1570:  "orbixd",
+	1571:  "rdb-dbs-disp",
+	1572:  "chip-lm",
+	1573:  "itscomm-ns",
+	1574:  "mvel-lm",
+	1575:  "oraclenames",
+	1576:  "moldflow-lm",
+	1577:  "hypercube-lm",
+	1578:  "jacobus-lm",
+	1579:  "ioc-sea-lm",
+	1580:  "tn-tl-r2",
+	1581:  "mil-2045-47001",
+	1582:  "msims",
+	1583:  "simbaexpress",
+	1584:  "tn-tl-fd2",
+	1585:  "intv",
+	1586:  "ibm-abtact",
+	1587:  "pra-elmd",
+	1588:  "triquest-lm",
+	1589:  "vqp",
+	1590:  "gemini-lm",
+	1591:  "ncpm-pm",
+	1592:  "commonspace",
+	1593:  "mainsoft-lm",
+	1594:  "sixtrak",
+	1595:  "radio",
+	1596:  "radio-bc",
+	1597:  "orbplus-iiop",
+	1598:  "picknfs",
+	1599:  "simbaservices",
+	1600:  "issd",
+	1601:  "aas",
+	1602:  "inspect",
+	1603:  "picodbc",
+	1604:  "icabrowser",
+	1605:  "slp",
+	1606:  "slm-api",
+	1607:  "stt",
+	1608:  "smart-lm",
+	1609:  "isysg-lm",
+	1610:  "taurus-wh",
+	1611:  "ill",
+	1612:  "netbill-trans",
+	1613:  "netbill-keyrep",
+	1614:  "netbill-cred",
+	1615:  "netbill-auth",
+	1616:  "netbill-prod",
+	1617:  "nimrod-agent",
+	1618:  "skytelnet",
+	1619:  "xs-openstorage",
+	1620:  "faxportwinport",
+	1621:  "softdataphone",
+	1622:  "ontime",
+	1623:  "jaleosnd",
+	1624:  "udp-sr-port",
+	1625:  "svs-omagent",
+	1626:  "shockwave",
+	1627:  "t128-gateway",
+	1628:  "lontalk-norm",
+	1629:  "lontalk-urgnt",
+	1630:  "oraclenet8cman",
+	1631:  "visitview",
+	1632:  "pammratc",
+	1633:  "pammrpc",
+	1634:  "loaprobe",
+	1635:  "edb-server1",
+	1636:  "isdc",
+	1637:  "islc",
+	1638:  "ismc",
+	1639:  "cert-initiator",
+	1640:  "cert-responder",
+	1641:  "invision",
+	1642:  "isis-am",
+	1643:  "isis-ambc",
+	1644:  "saiseh",
+	1645:  "sightline",
+	1646:  "sa-msg-port",
+	1647:  "rsap",
+	1648:  "concurrent-lm",
+	1649:  "kermit",
+	1650:  "nkd",
+	1651:  "shiva-confsrvr",
+	1652:  "xnmp",
+	1653:  "alphatech-lm",
+	1654:  "stargatealerts",
+	1655:  "dec-mbadmin",
+	1656:  "dec-mbadmin-h",
+	1657:  "fujitsu-mmpdc",
+	1658:  "sixnetudr",
+	1659:  "sg-lm",
+	1660:  "skip-mc-gikreq",
+	1661:  "netview-aix-1",
+	1662:  "netview-aix-2",
+	1663:  "netview-aix-3",
+	1664:  "netview-aix-4",
+	1665:  "netview-aix-5",
+	1666:  "netview-aix-6",
+	1667:  "netview-aix-7",
+	1668:  "netview-aix-8",
+	1669:  "netview-aix-9",
+	1670:  "netview-aix-10",
+	1671:  "netview-aix-11",
+	1672:  "netview-aix-12",
+	1673:  "proshare-mc-1",
+	1674:  "proshare-mc-2",
+	1675:  "pdp",
+	1676:  "netcomm2",
+	1677:  "groupwise",
+	1678:  "prolink",
+	1679:  "darcorp-lm",
+	1680:  "microcom-sbp",
+	1681:  "sd-elmd",
+	1682:  "lanyon-lantern",
+	1683:  "ncpm-hip",
+	1684:  "snaresecure",
+	1685:  "n2nremote",
+	1686:  "cvmon",
+	1687:  "nsjtp-ctrl",
+	1688:  "nsjtp-data",
+	1689:  "firefox",
+	1690:  "ng-umds",
+	1691:  "empire-empuma",
+	1692:  "sstsys-lm",
+	1693:  "rrirtr",
+	1694:  "rrimwm",
+	1695:  "rrilwm",
+	1696:  "rrifmm",
+	1697:  "rrisat",
+	1698:  "rsvp-encap-1",
+	1699:  "rsvp-encap-2",
+	1700:  "mps-raft",
+	1701:  "l2f",
+	1702:  "deskshare",
+	1703:  "hb-engine",
+	1704:  "bcs-broker",
+	1705:  "slingshot",
+	1706:  "jetform",
+	1707:  "vdmplay",
+	1708:  "gat-lmd",
+	1709:  "centra",
+	1710:  "impera",
+	1711:  "pptconference",
+	1712:  "registrar",
+	1713:  "conferencetalk",
+	1714:  "sesi-lm",
+	1715:  "houdini-lm",
+	1716:  "xmsg",
+	1717:  "fj-hdnet",
+	1718:  "h323gatedisc",
+	1719:  "h323gatestat",
+	1720:  "h323hostcall",
+	1721:  "caicci",
+	1722:  "hks-lm",
+	1723:  "pptp",
+	1724:  "csbphonemaster",
+	1725:  "iden-ralp",
+	1726:  "iberiagames",
+	1727:  "winddx",
+	1728:  "telindus",
+	1729:  "citynl",
+	1730:  "roketz",
+	1731:  "msiccp",
+	1732:  "proxim",
+	1733:  "siipat",
+	1734:  "cambertx-lm",
+	1735:  "privatechat",
+	1736:  "street-stream",
+	1737:  "ultimad",
+	1738:  "gamegen1",
+	1739:  "webaccess",
+	1740:  "encore",
+	1741:  "cisco-net-mgmt",
+	1742:  "3Com-nsd",
+	1743:  "cinegrfx-lm",
+	1744:  "ncpm-ft",
+	1745:  "remote-winsock",
+	1746:  "ftrapid-1",
+	1747:  "ftrapid-2",
+	1748:  "oracle-em1",
+	1749:  "aspen-services",
+	1750:  "sslp",
+	1751:  "swiftnet",
+	1752:  "lofr-lm",
+	1754:  "oracle-em2",
+	1755:  "ms-streaming",
+	1756:  "capfast-lmd",
+	1757:  "cnhrp",
+	1758:  "tftp-mcast",
+	1759:  "spss-lm",
+	1760:  "www-ldap-gw",
+	1761:  "cft-0",
+	1762:  "cft-1",
+	1763:  "cft-2",
+	1764:  "cft-3",
+	1765:  "cft-4",
+	1766:  "cft-5",
+	1767:  "cft-6",
+	1768:  "cft-7",
+	1769:  "bmc-net-adm",
+	1770:  "bmc-net-svc",
+	1771:  "vaultbase",
+	1772:  "essweb-gw",
+	1773:  "kmscontrol",
+	1774:  "global-dtserv",
+	1776:  "femis",
+	1777:  "powerguardian",
+	1778:  "prodigy-intrnet",
+	1779:  "pharmasoft",
+	1780:  "dpkeyserv",
+	1781:  "answersoft-lm",
+	1782:  "hp-hcip",
+	1784:  "finle-lm",
+	1785:  "windlm",
+	1786:  "funk-logger",
+	1787:  "funk-license",
+	1788:  "psmond",
+	1789:  "hello",
+	1790:  "nmsp",
+	1791:  "ea1",
+	1792:  "ibm-dt-2",
+	1793:  "rsc-robot",
+	1794:  "cera-bcm",
+	1795:  "dpi-proxy",
+	1796:  "vocaltec-admin",
+	1797:  "uma",
+	1798:  "etp",
+	1799:  "netrisk",
+	1800:  "ansys-lm",
+	1801:  "msmq",
+	1802:  "concomp1",
+	1803:  "hp-hcip-gwy",
+	1804:  "enl",
+	1805:  "enl-name",
+	1806:  "musiconline",
+	1807:  "fhsp",
+	1808:  "oracle-vp2",
+	1809:  "oracle-vp1",
+	1810:  "jerand-lm",
+	1811:  "scientia-sdb",
+	1812:  "radius",
+	1813:  "radius-acct",
+	1814:  "tdp-suite",
+	1815:  "mmpft",
+	1816:  "harp",
+	1817:  "rkb-oscs",
+	1818:  "etftp",
+	1819:  "plato-lm",
+	1820:  "mcagent",
+	1821:  "donnyworld",
+	1822:  "es-elmd",
+	1823:  "unisys-lm",
+	1824:  "metrics-pas",
+	1825:  "direcpc-video",
+	1826:  "ardt",
+	1827:  "asi",
+	1828:  "itm-mcell-u",
+	1829:  "optika-emedia",
+	1830:  "net8-cman",
+	1831:  "myrtle",
+	1832:  "tht-treasure",
+	1833:  "udpradio",
+	1834:  "ardusuni",
+	1835:  "ardusmul",
+	1836:  "ste-smsc",
+	1837:  "csoft1",
+	1838:  "talnet",
+	1839:  "netopia-vo1",
+	1840:  "netopia-vo2",
+	1841:  "netopia-vo3",
+	1842:  "netopia-vo4",
+	1843:  "netopia-vo5",
+	1844:  "direcpc-dll",
+	1845:  "altalink",
+	1846:  "tunstall-pnc",
+	1847:  "slp-notify",
+	1848:  "fjdocdist",
+	1849:  "alpha-sms",
+	1850:  "gsi",
+	1851:  "ctcd",
+	1852:  "virtual-time",
+	1853:  "vids-avtp",
+	1854:  "buddy-draw",
+	1855:  "fiorano-rtrsvc",
+	1856:  "fiorano-msgsvc",
+	1857:  "datacaptor",
+	1858:  "privateark",
+	1859:  "gammafetchsvr",
+	1860:  "sunscalar-svc",
+	1861:  "lecroy-vicp",
+	1862:  "mysql-cm-agent",
+	1863:  "msnp",
+	1864:  "paradym-31port",
+	1865:  "entp",
+	1866:  "swrmi",
+	1867:  "udrive",
+	1868:  "viziblebrowser",
+	1869:  "transact",
+	1870:  "sunscalar-dns",
+	1871:  "canocentral0",
+	1872:  "canocentral1",
+	1873:  "fjmpjps",
+	1874:  "fjswapsnp",
+	1875:  "westell-stats",
+	1876:  "ewcappsrv",
+	1877:  "hp-webqosdb",
+	1878:  "drmsmc",
+	1879:  "nettgain-nms",
+	1880:  "vsat-control",
+	1881:  "ibm-mqseries2",
+	1882:  "ecsqdmn",
+	1883:  "mqtt",
+	1884:  "idmaps",
+	1885:  "vrtstrapserver",
+	1886:  "leoip",
+	1887:  "filex-lport",
+	1888:  "ncconfig",
+	1889:  "unify-adapter",
+	1890:  "wilkenlistener",
+	1891:  "childkey-notif",
+	1892:  "childkey-ctrl",
+	1893:  "elad",
+	1894:  "o2server-port",
+	1896:  "b-novative-ls",
+	1897:  "metaagent",
+	1898:  "cymtec-port",
+	1899:  "mc2studios",
+	1900:  "ssdp",
+	1901:  "fjicl-tep-a",
+	1902:  "fjicl-tep-b",
+	1903:  "linkname",
+	1904:  "fjicl-tep-c",
+	1905:  "sugp",
+	1906:  "tpmd",
+	1907:  "intrastar",
+	1908:  "dawn",
+	1909:  "global-wlink",
+	1910:  "ultrabac",
+	1911:  "mtp",
+	1912:  "rhp-iibp",
+	1913:  "armadp",
+	1914:  "elm-momentum",
+	1915:  "facelink",
+	1916:  "persona",
+	1917:  "noagent",
+	1918:  "can-nds",
+	1919:  "can-dch",
+	1920:  "can-ferret",
+	1921:  "noadmin",
+	1922:  "tapestry",
+	1923:  "spice",
+	1924:  "xiip",
+	1925:  "discovery-port",
+	1926:  "egs",
+	1927:  "videte-cipc",
+	1928:  "emsd-port",
+	1929:  "bandwiz-system",
+	1930:  "driveappserver",
+	1931:  "amdsched",
+	1932:  "ctt-broker",
+	1933:  "xmapi",
+	1934:  "xaapi",
+	1935:  "macromedia-fcs",
+	1936:  "jetcmeserver",
+	1937:  "jwserver",
+	1938:  "jwclient",
+	1939:  "jvserver",
+	1940:  "jvclient",
+	1941:  "dic-aida",
+	1942:  "res",
+	1943:  "beeyond-media",
+	1944:  "close-combat",
+	1945:  "dialogic-elmd",
+	1946:  "tekpls",
+	1947:  "sentinelsrm",
+	1948:  "eye2eye",
+	1949:  "ismaeasdaqlive",
+	1950:  "ismaeasdaqtest",
+	1951:  "bcs-lmserver",
+	1952:  "mpnjsc",
+	1953:  "rapidbase",
+	1954:  "abr-api",
+	1955:  "abr-secure",
+	1956:  "vrtl-vmf-ds",
+	1957:  "unix-status",
+	1958:  "dxadmind",
+	1959:  "simp-all",
+	1960:  "nasmanager",
+	1961:  "bts-appserver",
+	1962:  "biap-mp",
+	1963:  "webmachine",
+	1964:  "solid-e-engine",
+	1965:  "tivoli-npm",
+	1966:  "slush",
+	1967:  "sns-quote",
+	1968:  "lipsinc",
+	1969:  "lipsinc1",
+	1970:  "netop-rc",
+	1971:  "netop-school",
+	1972:  "intersys-cache",
+	1973:  "dlsrap",
+	1974:  "drp",
+	1975:  "tcoflashagent",
+	1976:  "tcoregagent",
+	1977:  "tcoaddressbook",
+	1978:  "unisql",
+	1979:  "unisql-java",
+	1980:  "pearldoc-xact",
+	1981:  "p2pq",
+	1982:  "estamp",
+	1983:  "lhtp",
+	1984:  "bb",
+	1985:  "hsrp",
+	1986:  "licensedaemon",
+	1987:  "tr-rsrb-p1",
+	1988:  "tr-rsrb-p2",
+	1989:  "tr-rsrb-p3",
+	1990:  "stun-p1",
+	1991:  "stun-p2",
+	1992:  "stun-p3",
+	1993:  "snmp-tcp-port",
+	1994:  "stun-port",
+	1995:  "perf-port",
+	1996:  "tr-rsrb-port",
+	1997:  "gdp-port",
+	1998:  "x25-svc-port",
+	1999:  "tcp-id-port",
+	2000:  "cisco-sccp",
+	2001:  "wizard",
+	2002:  "globe",
+	2003:  "brutus",
+	2004:  "emce",
+	2005:  "oracle",
+	2006:  "raid-cd",
+	2007:  "raid-am",
+	2008:  "terminaldb",
+	2009:  "whosockami",
+	2010:  "pipe-server",
+	2011:  "servserv",
+	2012:  "raid-ac",
+	2013:  "raid-cd",
+	2014:  "raid-sf",
+	2015:  "raid-cs",
+	2016:  "bootserver",
+	2017:  "bootclient",
+	2018:  "rellpack",
+	2019:  "about",
+	2020:  "xinupageserver",
+	2021:  "xinuexpansion1",
+	2022:  "xinuexpansion2",
+	2023:  "xinuexpansion3",
+	2024:  "xinuexpansion4",
+	2025:  "xribs",
+	2026:  "scrabble",
+	2027:  "shadowserver",
+	2028:  "submitserver",
+	2029:  "hsrpv6",
+	2030:  "device2",
+	2031:  "mobrien-chat",
+	2032:  "blackboard",
+	2033:  "glogger",
+	2034:  "scoremgr",
+	2035:  "imsldoc",
+	2036:  "e-dpnet",
+	2037:  "applus",
+	2038:  "objectmanager",
+	2039:  "prizma",
+	2040:  "lam",
+	2041:  "interbase",
+	2042:  "isis",
+	2043:  "isis-bcast",
+	2044:  "rimsl",
+	2045:  "cdfunc",
+	2046:  "sdfunc",
+	2047:  "dls",
+	2048:  "dls-monitor",
+	2049:  "shilp",
+	2050:  "av-emb-config",
+	2051:  "epnsdp",
+	2052:  "clearvisn",
+	2053:  "lot105-ds-upd",
+	2054:  "weblogin",
+	2055:  "iop",
+	2056:  "omnisky",
+	2057:  "rich-cp",
+	2058:  "newwavesearch",
+	2059:  "bmc-messaging",
+	2060:  "teleniumdaemon",
+	2061:  "netmount",
+	2062:  "icg-swp",
+	2063:  "icg-bridge",
+	2064:  "icg-iprelay",
+	2065:  "dlsrpn",
+	2066:  "aura",
+	2067:  "dlswpn",
+	2068:  "avauthsrvprtcl",
+	2069:  "event-port",
+	2070:  "ah-esp-encap",
+	2071:  "acp-port",
+	2072:  "msync",
+	2073:  "gxs-data-port",
+	2074:  "vrtl-vmf-sa",
+	2075:  "newlixengine",
+	2076:  "newlixconfig",
+	2077:  "tsrmagt",
+	2078:  "tpcsrvr",
+	2079:  "idware-router",
+	2080:  "autodesk-nlm",
+	2081:  "kme-trap-port",
+	2082:  "infowave",
+	2083:  "radsec",
+	2084:  "sunclustergeo",
+	2085:  "ada-cip",
+	2086:  "gnunet",
+	2087:  "eli",
+	2088:  "ip-blf",
+	2089:  "sep",
+	2090:  "lrp",
+	2091:  "prp",
+	2092:  "descent3",
+	2093:  "nbx-cc",
+	2094:  "nbx-au",
+	2095:  "nbx-ser",
+	2096:  "nbx-dir",
+	2097:  "jetformpreview",
+	2098:  "dialog-port",
+	2099:  "h2250-annex-g",
+	2100:  "amiganetfs",
+	2101:  "rtcm-sc104",
+	2102:  "zephyr-srv",
+	2103:  "zephyr-clt",
+	2104:  "zephyr-hm",
+	2105:  "minipay",
+	2106:  "mzap",
+	2107:  "bintec-admin",
+	2108:  "comcam",
+	2109:  "ergolight",
+	2110:  "umsp",
+	2111:  "dsatp",
+	2112:  "idonix-metanet",
+	2113:  "hsl-storm",
+	2114:  "newheights",
+	2115:  "kdm",
+	2116:  "ccowcmr",
+	2117:  "mentaclient",
+	2118:  "mentaserver",
+	2119:  "gsigatekeeper",
+	2120:  "qencp",
+	2121:  "scientia-ssdb",
+	2122:  "caupc-remote",
+	2123:  "gtp-control",
+	2124:  "elatelink",
+	2125:  "lockstep",
+	2126:  "pktcable-cops",
+	2127:  "index-pc-wb",
+	2128:  "net-steward",
+	2129:  "cs-live",
+	2130:  "xds",
+	2131:  "avantageb2b",
+	2132:  "solera-epmap",
+	2133:  "zymed-zpp",
+	2134:  "avenue",
+	2135:  "gris",
+	2136:  "appworxsrv",
+	2137:  "connect",
+	2138:  "unbind-cluster",
+	2139:  "ias-auth",
+	2140:  "ias-reg",
+	2141:  "ias-admind",
+	2142:  "tdmoip",
+	2143:  "lv-jc",
+	2144:  "lv-ffx",
+	2145:  "lv-pici",
+	2146:  "lv-not",
+	2147:  "lv-auth",
+	2148:  "veritas-ucl",
+	2149:  "acptsys",
+	2150:  "dynamic3d",
+	2151:  "docent",
+	2152:  "gtp-user",
+	2153:  "ctlptc",
+	2154:  "stdptc",
+	2155:  "brdptc",
+	2156:  "trp",
+	2157:  "xnds",
+	2158:  "touchnetplus",
+	2159:  "gdbremote",
+	2160:  "apc-2160",
+	2161:  "apc-2161",
+	2162:  "navisphere",
+	2163:  "navisphere-sec",
+	2164:  "ddns-v3",
+	2165:  "x-bone-api",
+	2166:  "iwserver",
+	2167:  "raw-serial",
+	2168:  "easy-soft-mux",
+	2169:  "brain",
+	2170:  "eyetv",
+	2171:  "msfw-storage",
+	2172:  "msfw-s-storage",
+	2173:  "msfw-replica",
+	2174:  "msfw-array",
+	2175:  "airsync",
+	2176:  "rapi",
+	2177:  "qwave",
+	2178:  "bitspeer",
+	2179:  "vmrdp",
+	2180:  "mc-gt-srv",
+	2181:  "eforward",
+	2182:  "cgn-stat",
+	2183:  "cgn-config",
+	2184:  "nvd",
+	2185:  "onbase-dds",
+	2186:  "gtaua",
+	2187:  "ssmd",
+	2190:  "tivoconnect",
+	2191:  "tvbus",
+	2192:  "asdis",
+	2193:  "drwcs",
+	2197:  "mnp-exchange",
+	2198:  "onehome-remote",
+	2199:  "onehome-help",
+	2200:  "ici",
+	2201:  "ats",
+	2202:  "imtc-map",
+	2203:  "b2-runtime",
+	2204:  "b2-license",
+	2205:  "jps",
+	2206:  "hpocbus",
+	2207:  "hpssd",
+	2208:  "hpiod",
+	2209:  "rimf-ps",
+	2210:  "noaaport",
+	2211:  "emwin",
+	2212:  "leecoposserver",
+	2213:  "kali",
+	2214:  "rpi",
+	2215:  "ipcore",
+	2216:  "vtu-comms",
+	2217:  "gotodevice",
+	2218:  "bounzza",
+	2219:  "netiq-ncap",
+	2220:  "netiq",
+	2221:  "ethernet-ip-s",
+	2222:  "EtherNet-IP-1",
+	2223:  "rockwell-csp2",
+	2224:  "efi-mg",
+	2226:  "di-drm",
+	2227:  "di-msg",
+	2228:  "ehome-ms",
+	2229:  "datalens",
+	2230:  "queueadm",
+	2231:  "wimaxasncp",
+	2232:  "ivs-video",
+	2233:  "infocrypt",
+	2234:  "directplay",
+	2235:  "sercomm-wlink",
+	2236:  "nani",
+	2237:  "optech-port1-lm",
+	2238:  "aviva-sna",
+	2239:  "imagequery",
+	2240:  "recipe",
+	2241:  "ivsd",
+	2242:  "foliocorp",
+	2243:  "magicom",
+	2244:  "nmsserver",
+	2245:  "hao",
+	2246:  "pc-mta-addrmap",
+	2247:  "antidotemgrsvr",
+	2248:  "ums",
+	2249:  "rfmp",
+	2250:  "remote-collab",
+	2251:  "dif-port",
+	2252:  "njenet-ssl",
+	2253:  "dtv-chan-req",
+	2254:  "seispoc",
+	2255:  "vrtp",
+	2256:  "pcc-mfp",
+	2257:  "simple-tx-rx",
+	2258:  "rcts",
+	2260:  "apc-2260",
+	2261:  "comotionmaster",
+	2262:  "comotionback",
+	2263:  "ecwcfg",
+	2264:  "apx500api-1",
+	2265:  "apx500api-2",
+	2266:  "mfserver",
+	2267:  "ontobroker",
+	2268:  "amt",
+	2269:  "mikey",
+	2270:  "starschool",
+	2271:  "mmcals",
+	2272:  "mmcal",
+	2273:  "mysql-im",
+	2274:  "pcttunnell",
+	2275:  "ibridge-data",
+	2276:  "ibridge-mgmt",
+	2277:  "bluectrlproxy",
+	2278:  "s3db",
+	2279:  "xmquery",
+	2280:  "lnvpoller",
+	2281:  "lnvconsole",
+	2282:  "lnvalarm",
+	2283:  "lnvstatus",
+	2284:  "lnvmaps",
+	2285:  "lnvmailmon",
+	2286:  "nas-metering",
+	2287:  "dna",
+	2288:  "netml",
+	2289:  "dict-lookup",
+	2290:  "sonus-logging",
+	2291:  "eapsp",
+	2292:  "mib-streaming",
+	2293:  "npdbgmngr",
+	2294:  "konshus-lm",
+	2295:  "advant-lm",
+	2296:  "theta-lm",
+	2297:  "d2k-datamover1",
+	2298:  "d2k-datamover2",
+	2299:  "pc-telecommute",
+	2300:  "cvmmon",
+	2301:  "cpq-wbem",
+	2302:  "binderysupport",
+	2303:  "proxy-gateway",
+	2304:  "attachmate-uts",
+	2305:  "mt-scaleserver",
+	2306:  "tappi-boxnet",
+	2307:  "pehelp",
+	2308:  "sdhelp",
+	2309:  "sdserver",
+	2310:  "sdclient",
+	2311:  "messageservice",
+	2312:  "wanscaler",
+	2313:  "iapp",
+	2314:  "cr-websystems",
+	2315:  "precise-sft",
+	2316:  "sent-lm",
+	2317:  "attachmate-g32",
+	2318:  "cadencecontrol",
+	2319:  "infolibria",
+	2320:  "siebel-ns",
+	2321:  "rdlap",
+	2322:  "ofsd",
+	2323:  "3d-nfsd",
+	2324:  "cosmocall",
+	2325:  "ansysli",
+	2326:  "idcp",
+	2327:  "xingcsm",
+	2328:  "netrix-sftm",
+	2329:  "nvd",
+	2330:  "tscchat",
+	2331:  "agentview",
+	2332:  "rcc-host",
+	2333:  "snapp",
+	2334:  "ace-client",
+	2335:  "ace-proxy",
+	2336:  "appleugcontrol",
+	2337:  "ideesrv",
+	2338:  "norton-lambert",
+	2339:  "3com-webview",
+	2340:  "wrs-registry",
+	2341:  "xiostatus",
+	2342:  "manage-exec",
+	2343:  "nati-logos",
+	2344:  "fcmsys",
+	2345:  "dbm",
+	2346:  "redstorm-join",
+	2347:  "redstorm-find",
+	2348:  "redstorm-info",
+	2349:  "redstorm-diag",
+	2350:  "psbserver",
+	2351:  "psrserver",
+	2352:  "pslserver",
+	2353:  "pspserver",
+	2354:  "psprserver",
+	2355:  "psdbserver",
+	2356:  "gxtelmd",
+	2357:  "unihub-server",
+	2358:  "futrix",
+	2359:  "flukeserver",
+	2360:  "nexstorindltd",
+	2361:  "tl1",
+	2362:  "digiman",
+	2363:  "mediacntrlnfsd",
+	2364:  "oi-2000",
+	2365:  "dbref",
+	2366:  "qip-login",
+	2367:  "service-ctrl",
+	2368:  "opentable",
+	2370:  "l3-hbmon",
+	2372:  "lanmessenger",
+	2381:  "compaq-https",
+	2382:  "ms-olap3",
+	2383:  "ms-olap4",
+	2384:  "sd-capacity",
+	2385:  "sd-data",
+	2386:  "virtualtape",
+	2387:  "vsamredirector",
+	2388:  "mynahautostart",
+	2389:  "ovsessionmgr",
+	2390:  "rsmtp",
+	2391:  "3com-net-mgmt",
+	2392:  "tacticalauth",
+	2393:  "ms-olap1",
+	2394:  "ms-olap2",
+	2395:  "lan900-remote",
+	2396:  "wusage",
+	2397:  "ncl",
+	2398:  "orbiter",
+	2399:  "fmpro-fdal",
+	2400:  "opequus-server",
+	2401:  "cvspserver",
+	2402:  "taskmaster2000",
+	2403:  "taskmaster2000",
+	2404:  "iec-104",
+	2405:  "trc-netpoll",
+	2406:  "jediserver",
+	2407:  "orion",
+	2409:  "sns-protocol",
+	2410:  "vrts-registry",
+	2411:  "netwave-ap-mgmt",
+	2412:  "cdn",
+	2413:  "orion-rmi-reg",
+	2414:  "beeyond",
+	2415:  "codima-rtp",
+	2416:  "rmtserver",
+	2417:  "composit-server",
+	2418:  "cas",
+	2419:  "attachmate-s2s",
+	2420:  "dslremote-mgmt",
+	2421:  "g-talk",
+	2422:  "crmsbits",
+	2423:  "rnrp",
+	2424:  "kofax-svr",
+	2425:  "fjitsuappmgr",
+	2426:  "vcmp",
+	2427:  "mgcp-gateway",
+	2428:  "ott",
+	2429:  "ft-role",
+	2430:  "venus",
+	2431:  "venus-se",
+	2432:  "codasrv",
+	2433:  "codasrv-se",
+	2434:  "pxc-epmap",
+	2435:  "optilogic",
+	2436:  "topx",
+	2437:  "unicontrol",
+	2438:  "msp",
+	2439:  "sybasedbsynch",
+	2440:  "spearway",
+	2441:  "pvsw-inet",
+	2442:  "netangel",
+	2443:  "powerclientcsf",
+	2444:  "btpp2sectrans",
+	2445:  "dtn1",
+	2446:  "bues-service",
+	2447:  "ovwdb",
+	2448:  "hpppssvr",
+	2449:  "ratl",
+	2450:  "netadmin",
+	2451:  "netchat",
+	2452:  "snifferclient",
+	2453:  "madge-ltd",
+	2454:  "indx-dds",
+	2455:  "wago-io-system",
+	2456:  "altav-remmgt",
+	2457:  "rapido-ip",
+	2458:  "griffin",
+	2459:  "community",
+	2460:  "ms-theater",
+	2461:  "qadmifoper",
+	2462:  "qadmifevent",
+	2463:  "lsi-raid-mgmt",
+	2464:  "direcpc-si",
+	2465:  "lbm",
+	2466:  "lbf",
+	2467:  "high-criteria",
+	2468:  "qip-msgd",
+	2469:  "mti-tcs-comm",
+	2470:  "taskman-port",
+	2471:  "seaodbc",
+	2472:  "c3",
+	2473:  "aker-cdp",
+	2474:  "vitalanalysis",
+	2475:  "ace-server",
+	2476:  "ace-svr-prop",
+	2477:  "ssm-cvs",
+	2478:  "ssm-cssps",
+	2479:  "ssm-els",
+	2480:  "powerexchange",
+	2481:  "giop",
+	2482:  "giop-ssl",
+	2483:  "ttc",
+	2484:  "ttc-ssl",
+	2485:  "netobjects1",
+	2486:  "netobjects2",
+	2487:  "pns",
+	2488:  "moy-corp",
+	2489:  "tsilb",
+	2490:  "qip-qdhcp",
+	2491:  "conclave-cpp",
+	2492:  "groove",
+	2493:  "talarian-mqs",
+	2494:  "bmc-ar",
+	2495:  "fast-rem-serv",
+	2496:  "dirgis",
+	2497:  "quaddb",
+	2498:  "odn-castraq",
+	2499:  "unicontrol",
+	2500:  "rtsserv",
+	2501:  "rtsclient",
+	2502:  "kentrox-prot",
+	2503:  "nms-dpnss",
+	2504:  "wlbs",
+	2505:  "ppcontrol",
+	2506:  "jbroker",
+	2507:  "spock",
+	2508:  "jdatastore",
+	2509:  "fjmpss",
+	2510:  "fjappmgrbulk",
+	2511:  "metastorm",
+	2512:  "citrixima",
+	2513:  "citrixadmin",
+	2514:  "facsys-ntp",
+	2515:  "facsys-router",
+	2516:  "maincontrol",
+	2517:  "call-sig-trans",
+	2518:  "willy",
+	2519:  "globmsgsvc",
+	2520:  "pvsw",
+	2521:  "adaptecmgr",
+	2522:  "windb",
+	2523:  "qke-llc-v3",
+	2524:  "optiwave-lm",
+	2525:  "ms-v-worlds",
+	2526:  "ema-sent-lm",
+	2527:  "iqserver",
+	2528:  "ncr-ccl",
+	2529:  "utsftp",
+	2530:  "vrcommerce",
+	2531:  "ito-e-gui",
+	2532:  "ovtopmd",
+	2533:  "snifferserver",
+	2534:  "combox-web-acc",
+	2535:  "madcap",
+	2536:  "btpp2audctr1",
+	2537:  "upgrade",
+	2538:  "vnwk-prapi",
+	2539:  "vsiadmin",
+	2540:  "lonworks",
+	2541:  "lonworks2",
+	2542:  "udrawgraph",
+	2543:  "reftek",
+	2544:  "novell-zen",
+	2545:  "sis-emt",
+	2546:  "vytalvaultbrtp",
+	2547:  "vytalvaultvsmp",
+	2548:  "vytalvaultpipe",
+	2549:  "ipass",
+	2550:  "ads",
+	2551:  "isg-uda-server",
+	2552:  "call-logging",
+	2553:  "efidiningport",
+	2554:  "vcnet-link-v10",
+	2555:  "compaq-wcp",
+	2556:  "nicetec-nmsvc",
+	2557:  "nicetec-mgmt",
+	2558:  "pclemultimedia",
+	2559:  "lstp",
+	2560:  "labrat",
+	2561:  "mosaixcc",
+	2562:  "delibo",
+	2563:  "cti-redwood",
+	2564:  "hp-3000-telnet",
+	2565:  "coord-svr",
+	2566:  "pcs-pcw",
+	2567:  "clp",
+	2568:  "spamtrap",
+	2569:  "sonuscallsig",
+	2570:  "hs-port",
+	2571:  "cecsvc",
+	2572:  "ibp",
+	2573:  "trustestablish",
+	2574:  "blockade-bpsp",
+	2575:  "hl7",
+	2576:  "tclprodebugger",
+	2577:  "scipticslsrvr",
+	2578:  "rvs-isdn-dcp",
+	2579:  "mpfoncl",
+	2580:  "tributary",
+	2581:  "argis-te",
+	2582:  "argis-ds",
+	2583:  "mon",
+	2584:  "cyaserv",
+	2585:  "netx-server",
+	2586:  "netx-agent",
+	2587:  "masc",
+	2588:  "privilege",
+	2589:  "quartus-tcl",
+	2590:  "idotdist",
+	2591:  "maytagshuffle",
+	2592:  "netrek",
+	2593:  "mns-mail",
+	2594:  "dts",
+	2595:  "worldfusion1",
+	2596:  "worldfusion2",
+	2597:  "homesteadglory",
+	2598:  "citriximaclient",
+	2599:  "snapd",
+	2600:  "hpstgmgr",
+	2601:  "discp-client",
+	2602:  "discp-server",
+	2603:  "servicemeter",
+	2604:  "nsc-ccs",
+	2605:  "nsc-posa",
+	2606:  "netmon",
+	2607:  "connection",
+	2608:  "wag-service",
+	2609:  "system-monitor",
+	2610:  "versa-tek",
+	2611:  "lionhead",
+	2612:  "qpasa-agent",
+	2613:  "smntubootstrap",
+	2614:  "neveroffline",
+	2615:  "firepower",
+	2616:  "appswitch-emp",
+	2617:  "cmadmin",
+	2618:  "priority-e-com",
+	2619:  "bruce",
+	2620:  "lpsrecommender",
+	2621:  "miles-apart",
+	2622:  "metricadbc",
+	2623:  "lmdp",
+	2624:  "aria",
+	2625:  "blwnkl-port",
+	2626:  "gbjd816",
+	2627:  "moshebeeri",
+	2628:  "dict",
+	2629:  "sitaraserver",
+	2630:  "sitaramgmt",
+	2631:  "sitaradir",
+	2632:  "irdg-post",
+	2633:  "interintelli",
+	2634:  "pk-electronics",
+	2635:  "backburner",
+	2636:  "solve",
+	2637:  "imdocsvc",
+	2638:  "sybaseanywhere",
+	2639:  "aminet",
+	2640:  "ami-control",
+	2641:  "hdl-srv",
+	2642:  "tragic",
+	2643:  "gte-samp",
+	2644:  "travsoft-ipx-t",
+	2645:  "novell-ipx-cmd",
+	2646:  "and-lm",
+	2647:  "syncserver",
+	2648:  "upsnotifyprot",
+	2649:  "vpsipport",
+	2650:  "eristwoguns",
+	2651:  "ebinsite",
+	2652:  "interpathpanel",
+	2653:  "sonus",
+	2654:  "corel-vncadmin",
+	2655:  "unglue",
+	2656:  "kana",
+	2657:  "sns-dispatcher",
+	2658:  "sns-admin",
+	2659:  "sns-query",
+	2660:  "gcmonitor",
+	2661:  "olhost",
+	2662:  "bintec-capi",
+	2663:  "bintec-tapi",
+	2664:  "patrol-mq-gm",
+	2665:  "patrol-mq-nm",
+	2666:  "extensis",
+	2667:  "alarm-clock-s",
+	2668:  "alarm-clock-c",
+	2669:  "toad",
+	2670:  "tve-announce",
+	2671:  "newlixreg",
+	2672:  "nhserver",
+	2673:  "firstcall42",
+	2674:  "ewnn",
+	2675:  "ttc-etap",
+	2676:  "simslink",
+	2677:  "gadgetgate1way",
+	2678:  "gadgetgate2way",
+	2679:  "syncserverssl",
+	2680:  "pxc-sapxom",
+	2681:  "mpnjsomb",
+	2683:  "ncdloadbalance",
+	2684:  "mpnjsosv",
+	2685:  "mpnjsocl",
+	2686:  "mpnjsomg",
+	2687:  "pq-lic-mgmt",
+	2688:  "md-cg-http",
+	2689:  "fastlynx",
+	2690:  "hp-nnm-data",
+	2691:  "itinternet",
+	2692:  "admins-lms",
+	2694:  "pwrsevent",
+	2695:  "vspread",
+	2696:  "unifyadmin",
+	2697:  "oce-snmp-trap",
+	2698:  "mck-ivpip",
+	2699:  "csoft-plusclnt",
+	2700:  "tqdata",
+	2701:  "sms-rcinfo",
+	2702:  "sms-xfer",
+	2703:  "sms-chat",
+	2704:  "sms-remctrl",
+	2705:  "sds-admin",
+	2706:  "ncdmirroring",
+	2707:  "emcsymapiport",
+	2708:  "banyan-net",
+	2709:  "supermon",
+	2710:  "sso-service",
+	2711:  "sso-control",
+	2712:  "aocp",
+	2713:  "raventbs",
+	2714:  "raventdm",
+	2715:  "hpstgmgr2",
+	2716:  "inova-ip-disco",
+	2717:  "pn-requester",
+	2718:  "pn-requester2",
+	2719:  "scan-change",
+	2720:  "wkars",
+	2721:  "smart-diagnose",
+	2722:  "proactivesrvr",
+	2723:  "watchdog-nt",
+	2724:  "qotps",
+	2725:  "msolap-ptp2",
+	2726:  "tams",
+	2727:  "mgcp-callagent",
+	2728:  "sqdr",
+	2729:  "tcim-control",
+	2730:  "nec-raidplus",
+	2731:  "fyre-messanger",
+	2732:  "g5m",
+	2733:  "signet-ctf",
+	2734:  "ccs-software",
+	2735:  "netiq-mc",
+	2736:  "radwiz-nms-srv",
+	2737:  "srp-feedback",
+	2738:  "ndl-tcp-ois-gw",
+	2739:  "tn-timing",
+	2740:  "alarm",
+	2741:  "tsb",
+	2742:  "tsb2",
+	2743:  "murx",
+	2744:  "honyaku",
+	2745:  "urbisnet",
+	2746:  "cpudpencap",
+	2747:  "fjippol-swrly",
+	2748:  "fjippol-polsvr",
+	2749:  "fjippol-cnsl",
+	2750:  "fjippol-port1",
+	2751:  "fjippol-port2",
+	2752:  "rsisysaccess",
+	2753:  "de-spot",
+	2754:  "apollo-cc",
+	2755:  "expresspay",
+	2756:  "simplement-tie",
+	2757:  "cnrp",
+	2758:  "apollo-status",
+	2759:  "apollo-gms",
+	2760:  "sabams",
+	2761:  "dicom-iscl",
+	2762:  "dicom-tls",
+	2763:  "desktop-dna",
+	2764:  "data-insurance",
+	2765:  "qip-audup",
+	2766:  "compaq-scp",
+	2767:  "uadtc",
+	2768:  "uacs",
+	2769:  "exce",
+	2770:  "veronica",
+	2771:  "vergencecm",
+	2772:  "auris",
+	2773:  "rbakcup1",
+	2774:  "rbakcup2",
+	2775:  "smpp",
+	2776:  "ridgeway1",
+	2777:  "ridgeway2",
+	2778:  "gwen-sonya",
+	2779:  "lbc-sync",
+	2780:  "lbc-control",
+	2781:  "whosells",
+	2782:  "everydayrc",
+	2783:  "aises",
+	2784:  "www-dev",
+	2785:  "aic-np",
+	2786:  "aic-oncrpc",
+	2787:  "piccolo",
+	2788:  "fryeserv",
+	2789:  "media-agent",
+	2790:  "plgproxy",
+	2791:  "mtport-regist",
+	2792:  "f5-globalsite",
+	2793:  "initlsmsad",
+	2795:  "livestats",
+	2796:  "ac-tech",
+	2797:  "esp-encap",
+	2798:  "tmesis-upshot",
+	2799:  "icon-discover",
+	2800:  "acc-raid",
+	2801:  "igcp",
+	2802:  "veritas-udp1",
+	2803:  "btprjctrl",
+	2804:  "dvr-esm",
+	2805:  "wta-wsp-s",
+	2806:  "cspuni",
+	2807:  "cspmulti",
+	2808:  "j-lan-p",
+	2809:  "corbaloc",
+	2810:  "netsteward",
+	2811:  "gsiftp",
+	2812:  "atmtcp",
+	2813:  "llm-pass",
+	2814:  "llm-csv",
+	2815:  "lbc-measure",
+	2816:  "lbc-watchdog",
+	2817:  "nmsigport",
+	2818:  "rmlnk",
+	2819:  "fc-faultnotify",
+	2820:  "univision",
+	2821:  "vrts-at-port",
+	2822:  "ka0wuc",
+	2823:  "cqg-netlan",
+	2824:  "cqg-netlan-1",
+	2826:  "slc-systemlog",
+	2827:  "slc-ctrlrloops",
+	2828:  "itm-lm",
+	2829:  "silkp1",
+	2830:  "silkp2",
+	2831:  "silkp3",
+	2832:  "silkp4",
+	2833:  "glishd",
+	2834:  "evtp",
+	2835:  "evtp-data",
+	2836:  "catalyst",
+	2837:  "repliweb",
+	2838:  "starbot",
+	2839:  "nmsigport",
+	2840:  "l3-exprt",
+	2841:  "l3-ranger",
+	2842:  "l3-hawk",
+	2843:  "pdnet",
+	2844:  "bpcp-poll",
+	2845:  "bpcp-trap",
+	2846:  "aimpp-hello",
+	2847:  "aimpp-port-req",
+	2848:  "amt-blc-port",
+	2849:  "fxp",
+	2850:  "metaconsole",
+	2851:  "webemshttp",
+	2852:  "bears-01",
+	2853:  "ispipes",
+	2854:  "infomover",
+	2856:  "cesdinv",
+	2857:  "simctlp",
+	2858:  "ecnp",
+	2859:  "activememory",
+	2860:  "dialpad-voice1",
+	2861:  "dialpad-voice2",
+	2862:  "ttg-protocol",
+	2863:  "sonardata",
+	2864:  "astromed-main",
+	2865:  "pit-vpn",
+	2866:  "iwlistener",
+	2867:  "esps-portal",
+	2868:  "npep-messaging",
+	2869:  "icslap",
+	2870:  "daishi",
+	2871:  "msi-selectplay",
+	2872:  "radix",
+	2874:  "dxmessagebase1",
+	2875:  "dxmessagebase2",
+	2876:  "sps-tunnel",
+	2877:  "bluelance",
+	2878:  "aap",
+	2879:  "ucentric-ds",
+	2880:  "synapse",
+	2881:  "ndsp",
+	2882:  "ndtp",
+	2883:  "ndnp",
+	2884:  "flashmsg",
+	2885:  "topflow",
+	2886:  "responselogic",
+	2887:  "aironetddp",
+	2888:  "spcsdlobby",
+	2889:  "rsom",
+	2890:  "cspclmulti",
+	2891:  "cinegrfx-elmd",
+	2892:  "snifferdata",
+	2893:  "vseconnector",
+	2894:  "abacus-remote",
+	2895:  "natuslink",
+	2896:  "ecovisiong6-1",
+	2897:  "citrix-rtmp",
+	2898:  "appliance-cfg",
+	2899:  "powergemplus",
+	2900:  "quicksuite",
+	2901:  "allstorcns",
+	2902:  "netaspi",
+	2903:  "suitcase",
+	2904:  "m2ua",
+	2906:  "caller9",
+	2907:  "webmethods-b2b",
+	2908:  "mao",
+	2909:  "funk-dialout",
+	2910:  "tdaccess",
+	2911:  "blockade",
+	2912:  "epicon",
+	2913:  "boosterware",
+	2914:  "gamelobby",
+	2915:  "tksocket",
+	2916:  "elvin-server",
+	2917:  "elvin-client",
+	2918:  "kastenchasepad",
+	2919:  "roboer",
+	2920:  "roboeda",
+	2921:  "cesdcdman",
+	2922:  "cesdcdtrn",
+	2923:  "wta-wsp-wtp-s",
+	2924:  "precise-vip",
+	2926:  "mobile-file-dl",
+	2927:  "unimobilectrl",
+	2928:  "redstone-cpss",
+	2929:  "amx-webadmin",
+	2930:  "amx-weblinx",
+	2931:  "circle-x",
+	2932:  "incp",
+	2933:  "4-tieropmgw",
+	2934:  "4-tieropmcli",
+	2935:  "qtp",
+	2936:  "otpatch",
+	2937:  "pnaconsult-lm",
+	2938:  "sm-pas-1",
+	2939:  "sm-pas-2",
+	2940:  "sm-pas-3",
+	2941:  "sm-pas-4",
+	2942:  "sm-pas-5",
+	2943:  "ttnrepository",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	2946:  "fjsvmpor",
+	2947:  "gpsd",
+	2948:  "wap-push",
+	2949:  "wap-pushsecure",
+	2950:  "esip",
+	2951:  "ottp",
+	2952:  "mpfwsas",
+	2953:  "ovalarmsrv",
+	2954:  "ovalarmsrv-cmd",
+	2955:  "csnotify",
+	2956:  "ovrimosdbman",
+	2957:  "jmact5",
+	2958:  "jmact6",
+	2959:  "rmopagt",
+	2960:  "dfoxserver",
+	2961:  "boldsoft-lm",
+	2962:  "iph-policy-cli",
+	2963:  "iph-policy-adm",
+	2964:  "bullant-srap",
+	2965:  "bullant-rap",
+	2966:  "idp-infotrieve",
+	2967:  "ssc-agent",
+	2968:  "enpp",
+	2969:  "essp",
+	2970:  "index-net",
+	2971:  "netclip",
+	2972:  "pmsm-webrctl",
+	2973:  "svnetworks",
+	2974:  "signal",
+	2975:  "fjmpcm",
+	2976:  "cns-srv-port",
+	2977:  "ttc-etap-ns",
+	2978:  "ttc-etap-ds",
+	2979:  "h263-video",
+	2980:  "wimd",
+	2981:  "mylxamport",
+	2982:  "iwb-whiteboard",
+	2983:  "netplan",
+	2984:  "hpidsadmin",
+	2985:  "hpidsagent",
+	2986:  "stonefalls",
+	2987:  "identify",
+	2988:  "hippad",
+	2989:  "zarkov",
+	2990:  "boscap",
+	2991:  "wkstn-mon",
+	2992:  "avenyo",
+	2993:  "veritas-vis1",
+	2994:  "veritas-vis2",
+	2995:  "idrs",
+	2996:  "vsixml",
+	2997:  "rebol",
+	2998:  "realsecure",
+	2999:  "remoteware-un",
+	3000:  "hbci",
+	3002:  "exlm-agent",
+	3003:  "cgms",
+	3004:  "csoftragent",
+	3005:  "geniuslm",
+	3006:  "ii-admin",
+	3007:  "lotusmtap",
+	3008:  "midnight-tech",
+	3009:  "pxc-ntfy",
+	3010:  "ping-pong",
+	3011:  "trusted-web",
+	3012:  "twsdss",
+	3013:  "gilatskysurfer",
+	3014:  "broker-service",
+	3015:  "nati-dstp",
+	3016:  "notify-srvr",
+	3017:  "event-listener",
+	3018:  "srvc-registry",
+	3019:  "resource-mgr",
+	3020:  "cifs",
+	3021:  "agriserver",
+	3022:  "csregagent",
+	3023:  "magicnotes",
+	3024:  "nds-sso",
+	3025:  "arepa-raft",
+	3026:  "agri-gateway",
+	3027:  "LiebDevMgmt-C",
+	3028:  "LiebDevMgmt-DM",
+	3029:  "LiebDevMgmt-A",
+	3030:  "arepa-cas",
+	3031:  "eppc",
+	3032:  "redwood-chat",
+	3033:  "pdb",
+	3034:  "osmosis-aeea",
+	3035:  "fjsv-gssagt",
+	3036:  "hagel-dump",
+	3037:  "hp-san-mgmt",
+	3038:  "santak-ups",
+	3039:  "cogitate",
+	3040:  "tomato-springs",
+	3041:  "di-traceware",
+	3042:  "journee",
+	3043:  "brp",
+	3044:  "epp",
+	3045:  "responsenet",
+	3046:  "di-ase",
+	3047:  "hlserver",
+	3048:  "pctrader",
+	3049:  "nsws",
+	3050:  "gds-db",
+	3051:  "galaxy-server",
+	3052:  "apc-3052",
+	3053:  "dsom-server",
+	3054:  "amt-cnf-prot",
+	3055:  "policyserver",
+	3056:  "cdl-server",
+	3057:  "goahead-fldup",
+	3058:  "videobeans",
+	3059:  "qsoft",
+	3060:  "interserver",
+	3061:  "cautcpd",
+	3062:  "ncacn-ip-tcp",
+	3063:  "ncadg-ip-udp",
+	3064:  "rprt",
+	3065:  "slinterbase",
+	3066:  "netattachsdmp",
+	3067:  "fjhpjp",
+	3068:  "ls3bcast",
+	3069:  "ls3",
+	3070:  "mgxswitch",
+	3072:  "csd-monitor",
+	3073:  "vcrp",
+	3074:  "xbox",
+	3075:  "orbix-locator",
+	3076:  "orbix-config",
+	3077:  "orbix-loc-ssl",
+	3078:  "orbix-cfg-ssl",
+	3079:  "lv-frontpanel",
+	3080:  "stm-pproc",
+	3081:  "tl1-lv",
+	3082:  "tl1-raw",
+	3083:  "tl1-telnet",
+	3084:  "itm-mccs",
+	3085:  "pcihreq",
+	3086:  "jdl-dbkitchen",
+	3087:  "asoki-sma",
+	3088:  "xdtp",
+	3089:  "ptk-alink",
+	3090:  "stss",
+	3091:  "1ci-smcs",
+	3093:  "rapidmq-center",
+	3094:  "rapidmq-reg",
+	3095:  "panasas",
+	3096:  "ndl-aps",
+	3098:  "umm-port",
+	3099:  "chmd",
+	3100:  "opcon-xps",
+	3101:  "hp-pxpib",
+	3102:  "slslavemon",
+	3103:  "autocuesmi",
+	3104:  "autocuetime",
+	3105:  "cardbox",
+	3106:  "cardbox-http",
+	3107:  "business",
+	3108:  "geolocate",
+	3109:  "personnel",
+	3110:  "sim-control",
+	3111:  "wsynch",
+	3112:  "ksysguard",
+	3113:  "cs-auth-svr",
+	3114:  "ccmad",
+	3115:  "mctet-master",
+	3116:  "mctet-gateway",
+	3117:  "mctet-jserv",
+	3118:  "pkagent",
+	3119:  "d2000kernel",
+	3120:  "d2000webserver",
+	3122:  "vtr-emulator",
+	3123:  "edix",
+	3124:  "beacon-port",
+	3125:  "a13-an",
+	3127:  "ctx-bridge",
+	3128:  "ndl-aas",
+	3129:  "netport-id",
+	3130:  "icpv2",
+	3131:  "netbookmark",
+	3132:  "ms-rule-engine",
+	3133:  "prism-deploy",
+	3134:  "ecp",
+	3135:  "peerbook-port",
+	3136:  "grubd",
+	3137:  "rtnt-1",
+	3138:  "rtnt-2",
+	3139:  "incognitorv",
+	3140:  "ariliamulti",
+	3141:  "vmodem",
+	3142:  "rdc-wh-eos",
+	3143:  "seaview",
+	3144:  "tarantella",
+	3145:  "csi-lfap",
+	3146:  "bears-02",
+	3147:  "rfio",
+	3148:  "nm-game-admin",
+	3149:  "nm-game-server",
+	3150:  "nm-asses-admin",
+	3151:  "nm-assessor",
+	3152:  "feitianrockey",
+	3153:  "s8-client-port",
+	3154:  "ccmrmi",
+	3155:  "jpegmpeg",
+	3156:  "indura",
+	3157:  "e3consultants",
+	3158:  "stvp",
+	3159:  "navegaweb-port",
+	3160:  "tip-app-server",
+	3161:  "doc1lm",
+	3162:  "sflm",
+	3163:  "res-sap",
+	3164:  "imprs",
+	3165:  "newgenpay",
+	3166:  "sossecollector",
+	3167:  "nowcontact",
+	3168:  "poweronnud",
+	3169:  "serverview-as",
+	3170:  "serverview-asn",
+	3171:  "serverview-gf",
+	3172:  "serverview-rm",
+	3173:  "serverview-icc",
+	3174:  "armi-server",
+	3175:  "t1-e1-over-ip",
+	3176:  "ars-master",
+	3177:  "phonex-port",
+	3178:  "radclientport",
+	3179:  "h2gf-w-2m",
+	3180:  "mc-brk-srv",
+	3181:  "bmcpatrolagent",
+	3182:  "bmcpatrolrnvu",
+	3183:  "cops-tls",
+	3184:  "apogeex-port",
+	3185:  "smpppd",
+	3186:  "iiw-port",
+	3187:  "odi-port",
+	3188:  "brcm-comm-port",
+	3189:  "pcle-infex",
+	3190:  "csvr-proxy",
+	3191:  "csvr-sslproxy",
+	3192:  "firemonrcc",
+	3193:  "spandataport",
+	3194:  "magbind",
+	3195:  "ncu-1",
+	3196:  "ncu-2",
+	3197:  "embrace-dp-s",
+	3198:  "embrace-dp-c",
+	3199:  "dmod-workspace",
+	3200:  "tick-port",
+	3201:  "cpq-tasksmart",
+	3202:  "intraintra",
+	3203:  "netwatcher-mon",
+	3204:  "netwatcher-db",
+	3205:  "isns",
+	3206:  "ironmail",
+	3207:  "vx-auth-port",
+	3208:  "pfu-prcallback",
+	3209:  "netwkpathengine",
+	3210:  "flamenco-proxy",
+	3211:  "avsecuremgmt",
+	3212:  "surveyinst",
+	3213:  "neon24x7",
+	3214:  "jmq-daemon-1",
+	3215:  "jmq-daemon-2",
+	3216:  "ferrari-foam",
+	3217:  "unite",
+	3218:  "smartpackets",
+	3219:  "wms-messenger",
+	3220:  "xnm-ssl",
+	3221:  "xnm-clear-text",
+	3222:  "glbp",
+	3223:  "digivote",
+	3224:  "aes-discovery",
+	3225:  "fcip-port",
+	3226:  "isi-irp",
+	3227:  "dwnmshttp",
+	3228:  "dwmsgserver",
+	3229:  "global-cd-port",
+	3230:  "sftdst-port",
+	3231:  "vidigo",
+	3232:  "mdtp",
+	3233:  "whisker",
+	3234:  "alchemy",
+	3235:  "mdap-port",
+	3236:  "apparenet-ts",
+	3237:  "apparenet-tps",
+	3238:  "apparenet-as",
+	3239:  "apparenet-ui",
+	3240:  "triomotion",
+	3241:  "sysorb",
+	3242:  "sdp-id-port",
+	3243:  "timelot",
+	3244:  "onesaf",
+	3245:  "vieo-fe",
+	3246:  "dvt-system",
+	3247:  "dvt-data",
+	3248:  "procos-lm",
+	3249:  "ssp",
+	3250:  "hicp",
+	3251:  "sysscanner",
+	3252:  "dhe",
+	3253:  "pda-data",
+	3254:  "pda-sys",
+	3255:  "semaphore",
+	3256:  "cpqrpm-agent",
+	3257:  "cpqrpm-server",
+	3258:  "ivecon-port",
+	3259:  "epncdp2",
+	3260:  "iscsi-target",
+	3261:  "winshadow",
+	3262:  "necp",
+	3263:  "ecolor-imager",
+	3264:  "ccmail",
+	3265:  "altav-tunnel",
+	3266:  "ns-cfg-server",
+	3267:  "ibm-dial-out",
+	3268:  "msft-gc",
+	3269:  "msft-gc-ssl",
+	3270:  "verismart",
+	3271:  "csoft-prev",
+	3272:  "user-manager",
+	3273:  "sxmp",
+	3274:  "ordinox-server",
+	3275:  "samd",
+	3276:  "maxim-asics",
+	3277:  "awg-proxy",
+	3278:  "lkcmserver",
+	3279:  "admind",
+	3280:  "vs-server",
+	3281:  "sysopt",
+	3282:  "datusorb",
+	3283:  "Apple Remote Desktop (Net Assistant)",
+	3284:  "4talk",
+	3285:  "plato",
+	3286:  "e-net",
+	3287:  "directvdata",
+	3288:  "cops",
+	3289:  "enpc",
+	3290:  "caps-lm",
+	3291:  "sah-lm",
+	3292:  "cart-o-rama",
+	3293:  "fg-fps",
+	3294:  "fg-gip",
+	3295:  "dyniplookup",
+	3296:  "rib-slm",
+	3297:  "cytel-lm",
+	3298:  "deskview",
+	3299:  "pdrncs",
+	3302:  "mcs-fastmail",
+	3303:  "opsession-clnt",
+	3304:  "opsession-srvr",
+	3305:  "odette-ftp",
+	3306:  "mysql",
+	3307:  "opsession-prxy",
+	3308:  "tns-server",
+	3309:  "tns-adv",
+	3310:  "dyna-access",
+	3311:  "mcns-tel-ret",
+	3312:  "appman-server",
+	3313:  "uorb",
+	3314:  "uohost",
+	3315:  "cdid",
+	3316:  "aicc-cmi",
+	3317:  "vsaiport",
+	3318:  "ssrip",
+	3319:  "sdt-lmd",
+	3320:  "officelink2000",
+	3321:  "vnsstr",
+	3326:  "sftu",
+	3327:  "bbars",
+	3328:  "egptlm",
+	3329:  "hp-device-disc",
+	3330:  "mcs-calypsoicf",
+	3331:  "mcs-messaging",
+	3332:  "mcs-mailsvr",
+	3333:  "dec-notes",
+	3334:  "directv-web",
+	3335:  "directv-soft",
+	3336:  "directv-tick",
+	3337:  "directv-catlg",
+	3338:  "anet-b",
+	3339:  "anet-l",
+	3340:  "anet-m",
+	3341:  "anet-h",
+	3342:  "webtie",
+	3343:  "ms-cluster-net",
+	3344:  "bnt-manager",
+	3345:  "influence",
+	3346:  "trnsprntproxy",
+	3347:  "phoenix-rpc",
+	3348:  "pangolin-laser",
+	3349:  "chevinservices",
+	3350:  "findviatv",
+	3351:  "btrieve",
+	3352:  "ssql",
+	3353:  "fatpipe",
+	3354:  "suitjd",
+	3355:  "ordinox-dbase",
+	3356:  "upnotifyps",
+	3357:  "adtech-test",
+	3358:  "mpsysrmsvr",
+	3359:  "wg-netforce",
+	3360:  "kv-server",
+	3361:  "kv-agent",
+	3362:  "dj-ilm",
+	3363:  "nati-vi-server",
+	3364:  "creativeserver",
+	3365:  "contentserver",
+	3366:  "creativepartnr",
+	3372:  "tip2",
+	3373:  "lavenir-lm",
+	3374:  "cluster-disc",
+	3375:  "vsnm-agent",
+	3376:  "cdbroker",
+	3377:  "cogsys-lm",
+	3378:  "wsicopy",
+	3379:  "socorfs",
+	3380:  "sns-channels",
+	3381:  "geneous",
+	3382:  "fujitsu-neat",
+	3383:  "esp-lm",
+	3384:  "hp-clic",
+	3385:  "qnxnetman",
+	3386:  "gprs-sig",
+	3387:  "backroomnet",
+	3388:  "cbserver",
+	3389:  "ms-wbt-server",
+	3390:  "dsc",
+	3391:  "savant",
+	3392:  "efi-lm",
+	3393:  "d2k-tapestry1",
+	3394:  "d2k-tapestry2",
+	3395:  "dyna-lm",
+	3396:  "printer-agent",
+	3397:  "cloanto-lm",
+	3398:  "mercantile",
+	3399:  "csms",
+	3400:  "csms2",
+	3401:  "filecast",
+	3402:  "fxaengine-net",
+	3405:  "nokia-ann-ch1",
+	3406:  "nokia-ann-ch2",
+	3407:  "ldap-admin",
+	3408:  "BESApi",
+	3409:  "networklens",
+	3410:  "networklenss",
+	3411:  "biolink-auth",
+	3412:  "xmlblaster",
+	3413:  "svnet",
+	3414:  "wip-port",
+	3415:  "bcinameservice",
+	3416:  "commandport",
+	3417:  "csvr",
+	3418:  "rnmap",
+	3419:  "softaudit",
+	3420:  "ifcp-port",
+	3421:  "bmap",
+	3422:  "rusb-sys-port",
+	3423:  "xtrm",
+	3424:  "xtrms",
+	3425:  "agps-port",
+	3426:  "arkivio",
+	3427:  "websphere-snmp",
+	3428:  "twcss",
+	3429:  "gcsp",
+	3430:  "ssdispatch",
+	3431:  "ndl-als",
+	3432:  "osdcp",
+	3433:  "opnet-smp",
+	3434:  "opencm",
+	3435:  "pacom",
+	3436:  "gc-config",
+	3437:  "autocueds",
+	3438:  "spiral-admin",
+	3439:  "hri-port",
+	3440:  "ans-console",
+	3441:  "connect-client",
+	3442:  "connect-server",
+	3443:  "ov-nnm-websrv",
+	3444:  "denali-server",
+	3445:  "monp",
+	3446:  "3comfaxrpc",
+	3447:  "directnet",
+	3448:  "dnc-port",
+	3449:  "hotu-chat",
+	3450:  "castorproxy",
+	3451:  "asam",
+	3452:  "sabp-signal",
+	3453:  "pscupd",
+	3454:  "mira",
+	3455:  "prsvp",
+	3456:  "vat",
+	3457:  "vat-control",
+	3458:  "d3winosfi",
+	3459:  "integral",
+	3460:  "edm-manager",
+	3461:  "edm-stager",
+	3462:  "edm-std-notify",
+	3463:  "edm-adm-notify",
+	3464:  "edm-mgr-sync",
+	3465:  "edm-mgr-cntrl",
+	3466:  "workflow",
+	3467:  "rcst",
+	3468:  "ttcmremotectrl",
+	3469:  "pluribus",
+	3470:  "jt400",
+	3471:  "jt400-ssl",
+	3472:  "jaugsremotec-1",
+	3473:  "jaugsremotec-2",
+	3474:  "ttntspauto",
+	3475:  "genisar-port",
+	3476:  "nppmp",
+	3477:  "ecomm",
+	3478:  "stun",
+	3479:  "twrpc",
+	3480:  "plethora",
+	3481:  "cleanerliverc",
+	3482:  "vulture",
+	3483:  "slim-devices",
+	3484:  "gbs-stp",
+	3485:  "celatalk",
+	3486:  "ifsf-hb-port",
+	3487:  "ltcudp",
+	3488:  "fs-rh-srv",
+	3489:  "dtp-dia",
+	3490:  "colubris",
+	3491:  "swr-port",
+	3492:  "tvdumtray-port",
+	3493:  "nut",
+	3494:  "ibm3494",
+	3495:  "seclayer-tcp",
+	3496:  "seclayer-tls",
+	3497:  "ipether232port",
+	3498:  "dashpas-port",
+	3499:  "sccip-media",
+	3500:  "rtmp-port",
+	3501:  "isoft-p2p",
+	3502:  "avinstalldisc",
+	3503:  "lsp-ping",
+	3504:  "ironstorm",
+	3505:  "ccmcomm",
+	3506:  "apc-3506",
+	3507:  "nesh-broker",
+	3508:  "interactionweb",
+	3509:  "vt-ssl",
+	3510:  "xss-port",
+	3511:  "webmail-2",
+	3512:  "aztec",
+	3513:  "arcpd",
+	3514:  "must-p2p",
+	3515:  "must-backplane",
+	3516:  "smartcard-port",
+	3517:  "802-11-iapp",
+	3518:  "artifact-msg",
+	3519:  "galileo",
+	3520:  "galileolog",
+	3521:  "mc3ss",
+	3522:  "nssocketport",
+	3523:  "odeumservlink",
+	3524:  "ecmport",
+	3525:  "eisport",
+	3526:  "starquiz-port",
+	3527:  "beserver-msg-q",
+	3528:  "jboss-iiop",
+	3529:  "jboss-iiop-ssl",
+	3530:  "gf",
+	3531:  "joltid",
+	3532:  "raven-rmp",
+	3533:  "raven-rdp",
+	3534:  "urld-port",
+	3535:  "ms-la",
+	3536:  "snac",
+	3537:  "ni-visa-remote",
+	3538:  "ibm-diradm",
+	3539:  "ibm-diradm-ssl",
+	3540:  "pnrp-port",
+	3541:  "voispeed-port",
+	3542:  "hacl-monitor",
+	3543:  "qftest-lookup",
+	3544:  "teredo",
+	3545:  "camac",
+	3547:  "symantec-sim",
+	3548:  "interworld",
+	3549:  "tellumat-nms",
+	3550:  "ssmpp",
+	3551:  "apcupsd",
+	3552:  "taserver",
+	3553:  "rbr-discovery",
+	3554:  "questnotify",
+	3555:  "razor",
+	3556:  "sky-transport",
+	3557:  "personalos-001",
+	3558:  "mcp-port",
+	3559:  "cctv-port",
+	3560:  "iniserve-port",
+	3561:  "bmc-onekey",
+	3562:  "sdbproxy",
+	3563:  "watcomdebug",
+	3564:  "esimport",
+	3567:  "dof-eps",
+	3568:  "dof-tunnel-sec",
+	3569:  "mbg-ctrl",
+	3570:  "mccwebsvr-port",
+	3571:  "megardsvr-port",
+	3572:  "megaregsvrport",
+	3573:  "tag-ups-1",
+	3574:  "dmaf-caster",
+	3575:  "ccm-port",
+	3576:  "cmc-port",
+	3577:  "config-port",
+	3578:  "data-port",
+	3579:  "ttat3lb",
+	3580:  "nati-svrloc",
+	3581:  "kfxaclicensing",
+	3582:  "press",
+	3583:  "canex-watch",
+	3584:  "u-dbap",
+	3585:  "emprise-lls",
+	3586:  "emprise-lsc",
+	3587:  "p2pgroup",
+	3588:  "sentinel",
+	3589:  "isomair",
+	3590:  "wv-csp-sms",
+	3591:  "gtrack-server",
+	3592:  "gtrack-ne",
+	3593:  "bpmd",
+	3594:  "mediaspace",
+	3595:  "shareapp",
+	3596:  "iw-mmogame",
+	3597:  "a14",
+	3598:  "a15",
+	3599:  "quasar-server",
+	3600:  "trap-daemon",
+	3601:  "visinet-gui",
+	3602:  "infiniswitchcl",
+	3603:  "int-rcv-cntrl",
+	3604:  "bmc-jmx-port",
+	3605:  "comcam-io",
+	3606:  "splitlock",
+	3607:  "precise-i3",
+	3608:  "trendchip-dcp",
+	3609:  "cpdi-pidas-cm",
+	3610:  "echonet",
+	3611:  "six-degrees",
+	3612:  "hp-dataprotect",
+	3613:  "alaris-disc",
+	3614:  "sigma-port",
+	3615:  "start-network",
+	3616:  "cd3o-protocol",
+	3617:  "sharp-server",
+	3618:  "aairnet-1",
+	3619:  "aairnet-2",
+	3620:  "ep-pcp",
+	3621:  "ep-nsp",
+	3622:  "ff-lr-port",
+	3623:  "haipe-discover",
+	3624:  "dist-upgrade",
+	3625:  "volley",
+	3626:  "bvcdaemon-port",
+	3627:  "jamserverport",
+	3628:  "ept-machine",
+	3629:  "escvpnet",
+	3630:  "cs-remote-db",
+	3631:  "cs-services",
+	3632:  "distcc",
+	3633:  "wacp",
+	3634:  "hlibmgr",
+	3635:  "sdo",
+	3636:  "servistaitsm",
+	3637:  "scservp",
+	3638:  "ehp-backup",
+	3639:  "xap-ha",
+	3640:  "netplay-port1",
+	3641:  "netplay-port2",
+	3642:  "juxml-port",
+	3643:  "audiojuggler",
+	3644:  "ssowatch",
+	3645:  "cyc",
+	3646:  "xss-srv-port",
+	3647:  "splitlock-gw",
+	3648:  "fjcp",
+	3649:  "nmmp",
+	3650:  "prismiq-plugin",
+	3651:  "xrpc-registry",
+	3652:  "vxcrnbuport",
+	3653:  "tsp",
+	3654:  "vaprtm",
+	3655:  "abatemgr",
+	3656:  "abatjss",
+	3657:  "immedianet-bcn",
+	3658:  "ps-ams",
+	3659:  "apple-sasl",
+	3660:  "can-nds-ssl",
+	3661:  "can-ferret-ssl",
+	3662:  "pserver",
+	3663:  "dtp",
+	3664:  "ups-engine",
+	3665:  "ent-engine",
+	3666:  "eserver-pap",
+	3667:  "infoexch",
+	3668:  "dell-rm-port",
+	3669:  "casanswmgmt",
+	3670:  "smile",
+	3671:  "efcp",
+	3672:  "lispworks-orb",
+	3673:  "mediavault-gui",
+	3674:  "wininstall-ipc",
+	3675:  "calltrax",
+	3676:  "va-pacbase",
+	3677:  "roverlog",
+	3678:  "ipr-dglt",
+	3679:  "Escale (Newton Dock)",
+	3680:  "npds-tracker",
+	3681:  "bts-x73",
+	3682:  "cas-mapi",
+	3683:  "bmc-ea",
+	3684:  "faxstfx-port",
+	3685:  "dsx-agent",
+	3686:  "tnmpv2",
+	3687:  "simple-push",
+	3688:  "simple-push-s",
+	3689:  "daap",
+	3690:  "svn",
+	3691:  "magaya-network",
+	3692:  "intelsync",
+	3695:  "bmc-data-coll",
+	3696:  "telnetcpcd",
+	3697:  "nw-license",
+	3698:  "sagectlpanel",
+	3699:  "kpn-icw",
+	3700:  "lrs-paging",
+	3701:  "netcelera",
+	3702:  "ws-discovery",
+	3703:  "adobeserver-3",
+	3704:  "adobeserver-4",
+	3705:  "adobeserver-5",
+	3706:  "rt-event",
+	3707:  "rt-event-s",
+	3708:  "sun-as-iiops",
+	3709:  "ca-idms",
+	3710:  "portgate-auth",
+	3711:  "edb-server2",
+	3712:  "sentinel-ent",
+	3713:  "tftps",
+	3714:  "delos-dms",
+	3715:  "anoto-rendezv",
+	3716:  "wv-csp-sms-cir",
+	3717:  "wv-csp-udp-cir",
+	3718:  "opus-services",
+	3719:  "itelserverport",
+	3720:  "ufastro-instr",
+	3721:  "xsync",
+	3722:  "xserveraid",
+	3723:  "sychrond",
+	3724:  "blizwow",
+	3725:  "na-er-tip",
+	3726:  "array-manager",
+	3727:  "e-mdu",
+	3728:  "e-woa",
+	3729:  "fksp-audit",
+	3730:  "client-ctrl",
+	3731:  "smap",
+	3732:  "m-wnn",
+	3733:  "multip-msg",
+	3734:  "synel-data",
+	3735:  "pwdis",
+	3736:  "rs-rmi",
+	3738:  "versatalk",
+	3739:  "launchbird-lm",
+	3740:  "heartbeat",
+	3741:  "wysdma",
+	3742:  "cst-port",
+	3743:  "ipcs-command",
+	3744:  "sasg",
+	3745:  "gw-call-port",
+	3746:  "linktest",
+	3747:  "linktest-s",
+	3748:  "webdata",
+	3749:  "cimtrak",
+	3750:  "cbos-ip-port",
+	3751:  "gprs-cube",
+	3752:  "vipremoteagent",
+	3753:  "nattyserver",
+	3754:  "timestenbroker",
+	3755:  "sas-remote-hlp",
+	3756:  "canon-capt",
+	3757:  "grf-port",
+	3758:  "apw-registry",
+	3759:  "exapt-lmgr",
+	3760:  "adtempusclient",
+	3761:  "gsakmp",
+	3762:  "gbs-smp",
+	3763:  "xo-wave",
+	3764:  "mni-prot-rout",
+	3765:  "rtraceroute",
+	3767:  "listmgr-port",
+	3768:  "rblcheckd",
+	3769:  "haipe-otnk",
+	3770:  "cindycollab",
+	3771:  "paging-port",
+	3772:  "ctp",
+	3773:  "ctdhercules",
+	3774:  "zicom",
+	3775:  "ispmmgr",
+	3776:  "dvcprov-port",
+	3777:  "jibe-eb",
+	3778:  "c-h-it-port",
+	3779:  "cognima",
+	3780:  "nnp",
+	3781:  "abcvoice-port",
+	3782:  "iso-tp0s",
+	3783:  "bim-pem",
+	3784:  "bfd-control",
+	3785:  "bfd-echo",
+	3786:  "upstriggervsw",
+	3787:  "fintrx",
+	3788:  "isrp-port",
+	3789:  "remotedeploy",
+	3790:  "quickbooksrds",
+	3791:  "tvnetworkvideo",
+	3792:  "sitewatch",
+	3793:  "dcsoftware",
+	3794:  "jaus",
+	3795:  "myblast",
+	3796:  "spw-dialer",
+	3797:  "idps",
+	3798:  "minilock",
+	3799:  "radius-dynauth",
+	3800:  "pwgpsi",
+	3801:  "ibm-mgr",
+	3802:  "vhd",
+	3803:  "soniqsync",
+	3804:  "iqnet-port",
+	3805:  "tcpdataserver",
+	3806:  "wsmlb",
+	3807:  "spugna",
+	3808:  "sun-as-iiops-ca",
+	3809:  "apocd",
+	3810:  "wlanauth",
+	3811:  "amp",
+	3812:  "neto-wol-server",
+	3813:  "rap-ip",
+	3814:  "neto-dcs",
+	3815:  "lansurveyorxml",
+	3816:  "sunlps-http",
+	3817:  "tapeware",
+	3818:  "crinis-hb",
+	3819:  "epl-slp",
+	3820:  "scp",
+	3821:  "pmcp",
+	3822:  "acp-discovery",
+	3823:  "acp-conduit",
+	3824:  "acp-policy",
+	3825:  "ffserver",
+	3826:  "warmux",
+	3827:  "netmpi",
+	3828:  "neteh",
+	3829:  "neteh-ext",
+	3830:  "cernsysmgmtagt",
+	3831:  "dvapps",
+	3832:  "xxnetserver",
+	3833:  "aipn-auth",
+	3834:  "spectardata",
+	3835:  "spectardb",
+	3836:  "markem-dcp",
+	3837:  "mkm-discovery",
+	3838:  "sos",
+	3839:  "amx-rms",
+	3840:  "flirtmitmir",
+	3842:  "nhci",
+	3843:  "quest-agent",
+	3844:  "rnm",
+	3845:  "v-one-spp",
+	3846:  "an-pcp",
+	3847:  "msfw-control",
+	3848:  "item",
+	3849:  "spw-dnspreload",
+	3850:  "qtms-bootstrap",
+	3851:  "spectraport",
+	3852:  "sse-app-config",
+	3853:  "sscan",
+	3854:  "stryker-com",
+	3855:  "opentrac",
+	3856:  "informer",
+	3857:  "trap-port",
+	3858:  "trap-port-mom",
+	3859:  "nav-port",
+	3860:  "sasp",
+	3861:  "winshadow-hd",
+	3862:  "giga-pocket",
+	3863:  "asap-udp",
+	3865:  "xpl",
+	3866:  "dzdaemon",
+	3867:  "dzoglserver",
+	3869:  "ovsam-mgmt",
+	3870:  "ovsam-d-agent",
+	3871:  "avocent-adsap",
+	3872:  "oem-agent",
+	3873:  "fagordnc",
+	3874:  "sixxsconfig",
+	3875:  "pnbscada",
+	3876:  "dl-agent",
+	3877:  "xmpcr-interface",
+	3878:  "fotogcad",
+	3879:  "appss-lm",
+	3880:  "igrs",
+	3881:  "idac",
+	3882:  "msdts1",
+	3883:  "vrpn",
+	3884:  "softrack-meter",
+	3885:  "topflow-ssl",
+	3886:  "nei-management",
+	3887:  "ciphire-data",
+	3888:  "ciphire-serv",
+	3889:  "dandv-tester",
+	3890:  "ndsconnect",
+	3891:  "rtc-pm-port",
+	3892:  "pcc-image-port",
+	3893:  "cgi-starapi",
+	3894:  "syam-agent",
+	3895:  "syam-smc",
+	3896:  "sdo-tls",
+	3897:  "sdo-ssh",
+	3898:  "senip",
+	3899:  "itv-control",
+	3900:  "udt-os",
+	3901:  "nimsh",
+	3902:  "nimaux",
+	3903:  "charsetmgr",
+	3904:  "omnilink-port",
+	3905:  "mupdate",
+	3906:  "topovista-data",
+	3907:  "imoguia-port",
+	3908:  "hppronetman",
+	3909:  "surfcontrolcpa",
+	3910:  "prnrequest",
+	3911:  "prnstatus",
+	3912:  "gbmt-stars",
+	3913:  "listcrt-port",
+	3914:  "listcrt-port-2",
+	3915:  "agcat",
+	3916:  "wysdmc",
+	3917:  "aftmux",
+	3918:  "pktcablemmcops",
+	3919:  "hyperip",
+	3920:  "exasoftport1",
+	3921:  "herodotus-net",
+	3922:  "sor-update",
+	3923:  "symb-sb-port",
+	3924:  "mpl-gprs-port",
+	3925:  "zmp",
+	3926:  "winport",
+	3927:  "natdataservice",
+	3928:  "netboot-pxe",
+	3929:  "smauth-port",
+	3930:  "syam-webserver",
+	3931:  "msr-plugin-port",
+	3932:  "dyn-site",
+	3933:  "plbserve-port",
+	3934:  "sunfm-port",
+	3935:  "sdp-portmapper",
+	3936:  "mailprox",
+	3937:  "dvbservdsc",
+	3938:  "dbcontrol-agent",
+	3939:  "aamp",
+	3940:  "xecp-node",
+	3941:  "homeportal-web",
+	3942:  "srdp",
+	3943:  "tig",
+	3944:  "sops",
+	3945:  "emcads",
+	3946:  "backupedge",
+	3947:  "ccp",
+	3948:  "apdap",
+	3949:  "drip",
+	3950:  "namemunge",
+	3951:  "pwgippfax",
+	3952:  "i3-sessionmgr",
+	3953:  "xmlink-connect",
+	3954:  "adrep",
+	3955:  "p2pcommunity",
+	3956:  "gvcp",
+	3957:  "mqe-broker",
+	3958:  "mqe-agent",
+	3959:  "treehopper",
+	3960:  "bess",
+	3961:  "proaxess",
+	3962:  "sbi-agent",
+	3963:  "thrp",
+	3964:  "sasggprs",
+	3965:  "ati-ip-to-ncpe",
+	3966:  "bflckmgr",
+	3967:  "ppsms",
+	3968:  "ianywhere-dbns",
+	3969:  "landmarks",
+	3970:  "lanrevagent",
+	3971:  "lanrevserver",
+	3972:  "iconp",
+	3973:  "progistics",
+	3974:  "citysearch",
+	3975:  "airshot",
+	3976:  "opswagent",
+	3977:  "opswmanager",
+	3978:  "secure-cfg-svr",
+	3979:  "smwan",
+	3980:  "acms",
+	3981:  "starfish",
+	3982:  "eis",
+	3983:  "eisp",
+	3984:  "mapper-nodemgr",
+	3985:  "mapper-mapethd",
+	3986:  "mapper-ws-ethd",
+	3987:  "centerline",
+	3988:  "dcs-config",
+	3989:  "bv-queryengine",
+	3990:  "bv-is",
+	3991:  "bv-smcsrv",
+	3992:  "bv-ds",
+	3993:  "bv-agent",
+	3995:  "iss-mgmt-ssl",
+	3996:  "abcsoftware",
+	3997:  "agentsease-db",
+	3998:  "dnx",
+	3999:  "nvcnet",
+	4000:  "terabase",
+	4001:  "newoak",
+	4002:  "pxc-spvr-ft",
+	4003:  "pxc-splr-ft",
+	4004:  "pxc-roid",
+	4005:  "pxc-pin",
+	4006:  "pxc-spvr",
+	4007:  "pxc-splr",
+	4008:  "netcheque",
+	4009:  "chimera-hwm",
+	4010:  "samsung-unidex",
+	4011:  "altserviceboot",
+	4012:  "pda-gate",
+	4013:  "acl-manager",
+	4014:  "taiclock",
+	4015:  "talarian-mcast1",
+	4016:  "talarian-mcast2",
+	4017:  "talarian-mcast3",
+	4018:  "talarian-mcast4",
+	4019:  "talarian-mcast5",
+	4020:  "trap",
+	4021:  "nexus-portal",
+	4022:  "dnox",
+	4023:  "esnm-zoning",
+	4024:  "tnp1-port",
+	4025:  "partimage",
+	4026:  "as-debug",
+	4027:  "bxp",
+	4028:  "dtserver-port",
+	4029:  "ip-qsig",
+	4030:  "jdmn-port",
+	4031:  "suucp",
+	4032:  "vrts-auth-port",
+	4033:  "sanavigator",
+	4034:  "ubxd",
+	4035:  "wap-push-http",
+	4036:  "wap-push-https",
+	4037:  "ravehd",
+	4038:  "fazzt-ptp",
+	4039:  "fazzt-admin",
+	4040:  "yo-main",
+	4041:  "houston",
+	4042:  "ldxp",
+	4043:  "nirp",
+	4044:  "ltp",
+	4045:  "npp",
+	4046:  "acp-proto",
+	4047:  "ctp-state",
+	4049:  "wafs",
+	4050:  "cisco-wafs",
+	4051:  "cppdp",
+	4052:  "interact",
+	4053:  "ccu-comm-1",
+	4054:  "ccu-comm-2",
+	4055:  "ccu-comm-3",
+	4056:  "lms",
+	4057:  "wfm",
+	4058:  "kingfisher",
+	4059:  "dlms-cosem",
+	4060:  "dsmeter-iatc",
+	4061:  "ice-location",
+	4062:  "ice-slocation",
+	4063:  "ice-router",
+	4064:  "ice-srouter",
+	4065:  "avanti-cdp",
+	4066:  "pmas",
+	4067:  "idp",
+	4068:  "ipfltbcst",
+	4069:  "minger",
+	4070:  "tripe",
+	4071:  "aibkup",
+	4072:  "zieto-sock",
+	4073:  "iRAPP",
+	4074:  "cequint-cityid",
+	4075:  "perimlan",
+	4076:  "seraph",
+	4077:  "ascomalarm",
+	4079:  "santools",
+	4080:  "lorica-in",
+	4081:  "lorica-in-sec",
+	4082:  "lorica-out",
+	4083:  "lorica-out-sec",
+	4084:  "fortisphere-vm",
+	4086:  "ftsync",
+	4089:  "opencore",
+	4090:  "omasgport",
+	4091:  "ewinstaller",
+	4092:  "ewdgs",
+	4093:  "pvxpluscs",
+	4094:  "sysrqd",
+	4095:  "xtgui",
+	4096:  "bre",
+	4097:  "patrolview",
+	4098:  "drmsfsd",
+	4099:  "dpcp",
+	4100:  "igo-incognito",
+	4101:  "brlp-0",
+	4102:  "brlp-1",
+	4103:  "brlp-2",
+	4104:  "brlp-3",
+	4105:  "shofar",
+	4106:  "synchronite",
+	4107:  "j-ac",
+	4108:  "accel",
+	4109:  "izm",
+	4110:  "g2tag",
+	4111:  "xgrid",
+	4112:  "apple-vpns-rp",
+	4113:  "aipn-reg",
+	4114:  "jomamqmonitor",
+	4115:  "cds",
+	4116:  "smartcard-tls",
+	4117:  "hillrserv",
+	4118:  "netscript",
+	4119:  "assuria-slm",
+	4121:  "e-builder",
+	4122:  "fprams",
+	4123:  "z-wave",
+	4124:  "tigv2",
+	4125:  "opsview-envoy",
+	4126:  "ddrepl",
+	4127:  "unikeypro",
+	4128:  "nufw",
+	4129:  "nuauth",
+	4130:  "fronet",
+	4131:  "stars",
+	4132:  "nuts-dem",
+	4133:  "nuts-bootp",
+	4134:  "nifty-hmi",
+	4135:  "cl-db-attach",
+	4136:  "cl-db-request",
+	4137:  "cl-db-remote",
+	4138:  "nettest",
+	4139:  "thrtx",
+	4140:  "cedros-fds",
+	4141:  "oirtgsvc",
+	4142:  "oidocsvc",
+	4143:  "oidsr",
+	4145:  "vvr-control",
+	4146:  "tgcconnect",
+	4147:  "vrxpservman",
+	4148:  "hhb-handheld",
+	4149:  "agslb",
+	4150:  "PowerAlert-nsa",
+	4151:  "menandmice-noh",
+	4152:  "idig-mux",
+	4153:  "mbl-battd",
+	4154:  "atlinks",
+	4155:  "bzr",
+	4156:  "stat-results",
+	4157:  "stat-scanner",
+	4158:  "stat-cc",
+	4159:  "nss",
+	4160:  "jini-discovery",
+	4161:  "omscontact",
+	4162:  "omstopology",
+	4163:  "silverpeakpeer",
+	4164:  "silverpeakcomm",
+	4165:  "altcp",
+	4166:  "joost",
+	4167:  "ddgn",
+	4168:  "pslicser",
+	4169:  "iadt-disc",
+	4172:  "pcoip",
+	4173:  "mma-discovery",
+	4174:  "sm-disc",
+	4177:  "wello",
+	4178:  "storman",
+	4179:  "MaxumSP",
+	4180:  "httpx",
+	4181:  "macbak",
+	4182:  "pcptcpservice",
+	4183:  "cyborgnet",
+	4184:  "universe-suite",
+	4185:  "wcpp",
+	4188:  "vatata",
+	4191:  "dsmipv6",
+	4192:  "azeti-bd",
+	4197:  "hctl",
+	4199:  "eims-admin",
+	4300:  "corelccam",
+	4301:  "d-data",
+	4302:  "d-data-control",
+	4303:  "srcp",
+	4304:  "owserver",
+	4305:  "batman",
+	4306:  "pinghgl",
+	4307:  "trueconf",
+	4308:  "compx-lockview",
+	4309:  "dserver",
+	4310:  "mirrtex",
+	4320:  "fdt-rcatp",
+	4321:  "rwhois",
+	4322:  "trim-event",
+	4323:  "trim-ice",
+	4325:  "geognosisman",
+	4326:  "geognosis",
+	4327:  "jaxer-web",
+	4328:  "jaxer-manager",
+	4333:  "ahsp",
+	4340:  "gaia",
+	4341:  "lisp-data",
+	4342:  "lisp-control",
+	4343:  "unicall",
+	4344:  "vinainstall",
+	4345:  "m4-network-as",
+	4346:  "elanlm",
+	4347:  "lansurveyor",
+	4348:  "itose",
+	4349:  "fsportmap",
+	4350:  "net-device",
+	4351:  "plcy-net-svcs",
+	4352:  "pjlink",
+	4353:  "f5-iquery",
+	4354:  "qsnet-trans",
+	4355:  "qsnet-workst",
+	4356:  "qsnet-assist",
+	4357:  "qsnet-cond",
+	4358:  "qsnet-nucl",
+	4359:  "omabcastltkm",
+	4361:  "nacnl",
+	4362:  "afore-vdp-disc",
+	4366:  "shadowstream",
+	4368:  "wxbrief",
+	4369:  "epmd",
+	4370:  "elpro-tunnel",
+	4371:  "l2c-disc",
+	4372:  "l2c-data",
+	4373:  "remctl",
+	4375:  "tolteces",
+	4376:  "bip",
+	4377:  "cp-spxsvr",
+	4378:  "cp-spxdpy",
+	4379:  "ctdb",
+	4389:  "xandros-cms",
+	4390:  "wiegand",
+	4394:  "apwi-disc",
+	4395:  "omnivisionesx",
+	4400:  "ds-srv",
+	4401:  "ds-srvr",
+	4402:  "ds-clnt",
+	4403:  "ds-user",
+	4404:  "ds-admin",
+	4405:  "ds-mail",
+	4406:  "ds-slp",
+	4412:  "smallchat",
+	4413:  "avi-nms-disc",
+	4416:  "pjj-player-disc",
+	4418:  "axysbridge",
+	4420:  "nvm-express",
+	4425:  "netrockey6",
+	4426:  "beacon-port-2",
+	4430:  "rsqlserver",
+	4432:  "l-acoustics",
+	4441:  "netblox",
+	4442:  "saris",
+	4443:  "pharos",
+	4444:  "krb524",
+	4445:  "upnotifyp",
+	4446:  "n1-fwp",
+	4447:  "n1-rmgmt",
+	4448:  "asc-slmd",
+	4449:  "privatewire",
+	4450:  "camp",
+	4451:  "ctisystemmsg",
+	4452:  "ctiprogramload",
+	4453:  "nssalertmgr",
+	4454:  "nssagentmgr",
+	4455:  "prchat-user",
+	4456:  "prchat-server",
+	4457:  "prRegister",
+	4458:  "mcp",
+	4484:  "hpssmgmt",
+	4486:  "icms",
+	4488:  "awacs-ice",
+	4500:  "ipsec-nat-t",
+	4534:  "armagetronad",
+	4535:  "ehs",
+	4536:  "ehs-ssl",
+	4537:  "wssauthsvc",
+	4538:  "swx-gate",
+	4545:  "worldscores",
+	4546:  "sf-lm",
+	4547:  "lanner-lm",
+	4548:  "synchromesh",
+	4549:  "aegate",
+	4550:  "gds-adppiw-db",
+	4551:  "ieee-mih",
+	4552:  "menandmice-mon",
+	4554:  "msfrs",
+	4555:  "rsip",
+	4556:  "dtn-bundle",
+	4557:  "mtcevrunqss",
+	4558:  "mtcevrunqman",
+	4559:  "hylafax",
+	4566:  "kwtc",
+	4567:  "tram",
+	4568:  "bmc-reporting",
+	4569:  "iax",
+	4591:  "l3t-at-an",
+	4592:  "hrpd-ith-at-an",
+	4593:  "ipt-anri-anri",
+	4594:  "ias-session",
+	4595:  "ias-paging",
+	4596:  "ias-neighbor",
+	4597:  "a21-an-1xbs",
+	4598:  "a16-an-an",
+	4599:  "a17-an-an",
+	4600:  "piranha1",
+	4601:  "piranha2",
+	4621:  "ventoso",
+	4658:  "playsta2-app",
+	4659:  "playsta2-lob",
+	4660:  "smaclmgr",
+	4661:  "kar2ouche",
+	4662:  "oms",
+	4663:  "noteit",
+	4664:  "ems",
+	4665:  "contclientms",
+	4666:  "eportcomm",
+	4667:  "mmacomm",
+	4668:  "mmaeds",
+	4669:  "eportcommdata",
+	4670:  "light",
+	4671:  "acter",
+	4672:  "rfa",
+	4673:  "cxws",
+	4674:  "appiq-mgmt",
+	4675:  "dhct-status",
+	4676:  "dhct-alerts",
+	4677:  "bcs",
+	4678:  "traversal",
+	4679:  "mgesupervision",
+	4680:  "mgemanagement",
+	4681:  "parliant",
+	4682:  "finisar",
+	4683:  "spike",
+	4684:  "rfid-rp1",
+	4685:  "autopac",
+	4686:  "msp-os",
+	4687:  "nst",
+	4688:  "mobile-p2p",
+	4689:  "altovacentral",
+	4690:  "prelude",
+	4691:  "mtn",
+	4692:  "conspiracy",
+	4700:  "netxms-agent",
+	4701:  "netxms-mgmt",
+	4702:  "netxms-sync",
+	4711:  "trinity-dist",
+	4725:  "truckstar",
+	4726:  "a26-fap-fgw",
+	4727:  "fcis-disc",
+	4728:  "capmux",
+	4729:  "gsmtap",
+	4730:  "gearman",
+	4732:  "ohmtrigger",
+	4737:  "ipdr-sp",
+	4738:  "solera-lpn",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	4741:  "lumimgrd",
+	4742:  "sicct-sdp",
+	4743:  "openhpid",
+	4744:  "ifsp",
+	4745:  "fmp",
+	4746:  "intelliadm-disc",
+	4747:  "buschtrommel",
+	4749:  "profilemac",
+	4750:  "ssad",
+	4751:  "spocp",
+	4752:  "snap",
+	4753:  "simon-disc",
+	4754:  "gre-in-udp",
+	4755:  "gre-udp-dtls",
+	4784:  "bfd-multi-ctl",
+	4785:  "cncp",
+	4789:  "vxlan",
+	4790:  "vxlan-gpe",
+	4791:  "roce",
+	4800:  "iims",
+	4801:  "iwec",
+	4802:  "ilss",
+	4803:  "notateit-disc",
+	4804:  "aja-ntv4-disc",
+	4827:  "htcp",
+	4837:  "varadero-0",
+	4838:  "varadero-1",
+	4839:  "varadero-2",
+	4840:  "opcua-udp",
+	4841:  "quosa",
+	4842:  "gw-asv",
+	4843:  "opcua-tls",
+	4844:  "gw-log",
+	4845:  "wcr-remlib",
+	4846:  "contamac-icm",
+	4847:  "wfc",
+	4848:  "appserv-http",
+	4849:  "appserv-https",
+	4850:  "sun-as-nodeagt",
+	4851:  "derby-repli",
+	4867:  "unify-debug",
+	4868:  "phrelay",
+	4869:  "phrelaydbg",
+	4870:  "cc-tracking",
+	4871:  "wired",
+	4876:  "tritium-can",
+	4877:  "lmcs",
+	4878:  "inst-discovery",
+	4881:  "socp-t",
+	4882:  "socp-c",
+	4884:  "hivestor",
+	4885:  "abbs",
+	4894:  "lyskom",
+	4899:  "radmin-port",
+	4900:  "hfcs",
+	4914:  "bones",
+	4936:  "an-signaling",
+	4937:  "atsc-mh-ssc",
+	4940:  "eq-office-4940",
+	4941:  "eq-office-4941",
+	4942:  "eq-office-4942",
+	4949:  "munin",
+	4950:  "sybasesrvmon",
+	4951:  "pwgwims",
+	4952:  "sagxtsds",
+	4969:  "ccss-qmm",
+	4970:  "ccss-qsm",
+	4980:  "ctxs-vpp",
+	4986:  "mrip",
+	4987:  "smar-se-port1",
+	4988:  "smar-se-port2",
+	4989:  "parallel",
+	4990:  "busycal",
+	4991:  "vrt",
+	4999:  "hfcs-manager",
+	5000:  "commplex-main",
+	5001:  "commplex-link",
+	5002:  "rfe",
+	5003:  "fmpro-internal",
+	5004:  "avt-profile-1",
+	5005:  "avt-profile-2",
+	5006:  "wsm-server",
+	5007:  "wsm-server-ssl",
+	5008:  "synapsis-edge",
+	5009:  "winfs",
+	5010:  "telelpathstart",
+	5011:  "telelpathattack",
+	5012:  "nsp",
+	5013:  "fmpro-v6",
+	5014:  "onpsocket",
+	5020:  "zenginkyo-1",
+	5021:  "zenginkyo-2",
+	5022:  "mice",
+	5023:  "htuilsrv",
+	5024:  "scpi-telnet",
+	5025:  "scpi-raw",
+	5026:  "strexec-d",
+	5027:  "strexec-s",
+	5029:  "infobright",
+	5030:  "surfpass",
+	5031:  "dmp",
+	5042:  "asnaacceler8db",
+	5043:  "swxadmin",
+	5044:  "lxi-evntsvc",
+	5046:  "vpm-udp",
+	5047:  "iscape",
+	5049:  "ivocalize",
+	5050:  "mmcc",
+	5051:  "ita-agent",
+	5052:  "ita-manager",
+	5053:  "rlm-disc",
+	5055:  "unot",
+	5056:  "intecom-ps1",
+	5057:  "intecom-ps2",
+	5058:  "locus-disc",
+	5059:  "sds",
+	5060:  "sip",
+	5061:  "sips",
+	5062:  "na-localise",
+	5064:  "ca-1",
+	5065:  "ca-2",
+	5066:  "stanag-5066",
+	5067:  "authentx",
+	5069:  "i-net-2000-npr",
+	5070:  "vtsas",
+	5071:  "powerschool",
+	5072:  "ayiya",
+	5073:  "tag-pm",
+	5074:  "alesquery",
+	5078:  "pixelpusher",
+	5079:  "cp-spxrpts",
+	5080:  "onscreen",
+	5081:  "sdl-ets",
+	5082:  "qcp",
+	5083:  "qfp",
+	5084:  "llrp",
+	5085:  "encrypted-llrp",
+	5092:  "magpie",
+	5093:  "sentinel-lm",
+	5094:  "hart-ip",
+	5099:  "sentlm-srv2srv",
+	5100:  "socalia",
+	5101:  "talarian-udp",
+	5102:  "oms-nonsecure",
+	5104:  "tinymessage",
+	5105:  "hughes-ap",
+	5111:  "taep-as-svc",
+	5112:  "pm-cmdsvr",
+	5116:  "emb-proj-cmd",
+	5120:  "barracuda-bbs",
+	5133:  "nbt-pc",
+	5136:  "minotaur-sa",
+	5137:  "ctsd",
+	5145:  "rmonitor-secure",
+	5150:  "atmp",
+	5151:  "esri-sde",
+	5152:  "sde-discovery",
+	5154:  "bzflag",
+	5155:  "asctrl-agent",
+	5164:  "vpa-disc",
+	5165:  "ife-icorp",
+	5166:  "winpcs",
+	5167:  "scte104",
+	5168:  "scte30",
+	5190:  "aol",
+	5191:  "aol-1",
+	5192:  "aol-2",
+	5193:  "aol-3",
+	5200:  "targus-getdata",
+	5201:  "targus-getdata1",
+	5202:  "targus-getdata2",
+	5203:  "targus-getdata3",
+	5223:  "hpvirtgrp",
+	5224:  "hpvirtctrl",
+	5225:  "hp-server",
+	5226:  "hp-status",
+	5227:  "perfd",
+	5234:  "eenet",
+	5235:  "galaxy-network",
+	5236:  "padl2sim",
+	5237:  "mnet-discovery",
+	5245:  "downtools-disc",
+	5246:  "capwap-control",
+	5247:  "capwap-data",
+	5248:  "caacws",
+	5249:  "caaclang2",
+	5250:  "soagateway",
+	5251:  "caevms",
+	5252:  "movaz-ssc",
+	5264:  "3com-njack-1",
+	5265:  "3com-njack-2",
+	5270:  "cartographerxmp",
+	5271:  "cuelink-disc",
+	5272:  "pk",
+	5282:  "transmit-port",
+	5298:  "presence",
+	5299:  "nlg-data",
+	5300:  "hacl-hb",
+	5301:  "hacl-gs",
+	5302:  "hacl-cfg",
+	5303:  "hacl-probe",
+	5304:  "hacl-local",
+	5305:  "hacl-test",
+	5306:  "sun-mc-grp",
+	5307:  "sco-aip",
+	5308:  "cfengine",
+	5309:  "jprinter",
+	5310:  "outlaws",
+	5312:  "permabit-cs",
+	5313:  "rrdp",
+	5314:  "opalis-rbt-ipc",
+	5315:  "hacl-poll",
+	5343:  "kfserver",
+	5344:  "xkotodrcp",
+	5349:  "stuns",
+	5350:  "pcp-multicast",
+	5351:  "pcp",
+	5352:  "dns-llq",
+	5353:  "mdns",
+	5354:  "mdnsresponder",
+	5355:  "llmnr",
+	5356:  "ms-smlbiz",
+	5357:  "wsdapi",
+	5358:  "wsdapi-s",
+	5359:  "ms-alerter",
+	5360:  "ms-sideshow",
+	5361:  "ms-s-sideshow",
+	5362:  "serverwsd2",
+	5363:  "net-projection",
+	5364:  "kdnet",
+	5397:  "stresstester",
+	5398:  "elektron-admin",
+	5399:  "securitychase",
+	5400:  "excerpt",
+	5401:  "excerpts",
+	5402:  "mftp",
+	5403:  "hpoms-ci-lstn",
+	5404:  "hpoms-dps-lstn",
+	5405:  "netsupport",
+	5406:  "systemics-sox",
+	5407:  "foresyte-clear",
+	5408:  "foresyte-sec",
+	5409:  "salient-dtasrv",
+	5410:  "salient-usrmgr",
+	5411:  "actnet",
+	5412:  "continuus",
+	5413:  "wwiotalk",
+	5414:  "statusd",
+	5415:  "ns-server",
+	5416:  "sns-gateway",
+	5417:  "sns-agent",
+	5418:  "mcntp",
+	5419:  "dj-ice",
+	5420:  "cylink-c",
+	5421:  "netsupport2",
+	5422:  "salient-mux",
+	5423:  "virtualuser",
+	5424:  "beyond-remote",
+	5425:  "br-channel",
+	5426:  "devbasic",
+	5427:  "sco-peer-tta",
+	5428:  "telaconsole",
+	5429:  "base",
+	5430:  "radec-corp",
+	5431:  "park-agent",
+	5432:  "postgresql",
+	5433:  "pyrrho",
+	5434:  "sgi-arrayd",
+	5435:  "sceanics",
+	5436:  "pmip6-cntl",
+	5437:  "pmip6-data",
+	5443:  "spss",
+	5450:  "tiepie-disc",
+	5453:  "surebox",
+	5454:  "apc-5454",
+	5455:  "apc-5455",
+	5456:  "apc-5456",
+	5461:  "silkmeter",
+	5462:  "ttl-publisher",
+	5463:  "ttlpriceproxy",
+	5464:  "quailnet",
+	5465:  "netops-broker",
+	5474:  "apsolab-rpc",
+	5500:  "fcp-addr-srvr1",
+	5501:  "fcp-addr-srvr2",
+	5502:  "fcp-srvr-inst1",
+	5503:  "fcp-srvr-inst2",
+	5504:  "fcp-cics-gw1",
+	5505:  "checkoutdb",
+	5506:  "amc",
+	5553:  "sgi-eventmond",
+	5554:  "sgi-esphttp",
+	5555:  "personal-agent",
+	5556:  "freeciv",
+	5567:  "dof-dps-mc-sec",
+	5568:  "sdt",
+	5569:  "rdmnet-device",
+	5573:  "sdmmp",
+	5580:  "tmosms0",
+	5581:  "tmosms1",
+	5582:  "fac-restore",
+	5583:  "tmo-icon-sync",
+	5584:  "bis-web",
+	5585:  "bis-sync",
+	5597:  "ininmessaging",
+	5598:  "mctfeed",
+	5599:  "esinstall",
+	5600:  "esmmanager",
+	5601:  "esmagent",
+	5602:  "a1-msc",
+	5603:  "a1-bs",
+	5604:  "a3-sdunode",
+	5605:  "a4-sdunode",
+	5627:  "ninaf",
+	5628:  "htrust",
+	5629:  "symantec-sfdb",
+	5630:  "precise-comm",
+	5631:  "pcanywheredata",
+	5632:  "pcanywherestat",
+	5633:  "beorl",
+	5634:  "xprtld",
+	5670:  "zre-disc",
+	5671:  "amqps",
+	5672:  "amqp",
+	5673:  "jms",
+	5674:  "hyperscsi-port",
+	5675:  "v5ua",
+	5676:  "raadmin",
+	5677:  "questdb2-lnchr",
+	5678:  "rrac",
+	5679:  "dccm",
+	5680:  "auriga-router",
+	5681:  "ncxcp",
+	5682:  "brightcore",
+	5683:  "coap",
+	5684:  "coaps",
+	5687:  "gog-multiplayer",
+	5688:  "ggz",
+	5689:  "qmvideo",
+	5713:  "proshareaudio",
+	5714:  "prosharevideo",
+	5715:  "prosharedata",
+	5716:  "prosharerequest",
+	5717:  "prosharenotify",
+	5718:  "dpm",
+	5719:  "dpm-agent",
+	5720:  "ms-licensing",
+	5721:  "dtpt",
+	5722:  "msdfsr",
+	5723:  "omhs",
+	5724:  "omsdk",
+	5728:  "io-dist-group",
+	5729:  "openmail",
+	5730:  "unieng",
+	5741:  "ida-discover1",
+	5742:  "ida-discover2",
+	5743:  "watchdoc-pod",
+	5744:  "watchdoc",
+	5745:  "fcopy-server",
+	5746:  "fcopys-server",
+	5747:  "tunatic",
+	5748:  "tunalyzer",
+	5750:  "rscd",
+	5755:  "openmailg",
+	5757:  "x500ms",
+	5766:  "openmailns",
+	5767:  "s-openmail",
+	5768:  "openmailpxy",
+	5769:  "spramsca",
+	5770:  "spramsd",
+	5771:  "netagent",
+	5777:  "dali-port",
+	5781:  "3par-evts",
+	5782:  "3par-mgmt",
+	5783:  "3par-mgmt-ssl",
+	5784:  "ibar",
+	5785:  "3par-rcopy",
+	5786:  "cisco-redu",
+	5787:  "waascluster",
+	5793:  "xtreamx",
+	5794:  "spdp",
+	5813:  "icmpd",
+	5814:  "spt-automation",
+	5859:  "wherehoo",
+	5863:  "ppsuitemsg",
+	5900:  "rfb",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	5963:  "indy",
+	5968:  "mppolicy-v5",
+	5969:  "mppolicy-mgr",
+	5984:  "couchdb",
+	5985:  "wsman",
+	5986:  "wsmans",
+	5987:  "wbem-rmi",
+	5988:  "wbem-http",
+	5989:  "wbem-https",
+	5990:  "wbem-exp-https",
+	5991:  "nuxsl",
+	5992:  "consul-insight",
+	5999:  "cvsup",
+	6064:  "ndl-ahp-svc",
+	6065:  "winpharaoh",
+	6066:  "ewctsp",
+	6069:  "trip",
+	6070:  "messageasap",
+	6071:  "ssdtp",
+	6072:  "diagnose-proc",
+	6073:  "directplay8",
+	6074:  "max",
+	6080:  "gue",
+	6081:  "geneve",
+	6082:  "p25cai",
+	6083:  "miami-bcast",
+	6085:  "konspire2b",
+	6086:  "pdtp",
+	6087:  "ldss",
+	6088:  "doglms-notify",
+	6100:  "synchronet-db",
+	6101:  "synchronet-rtc",
+	6102:  "synchronet-upd",
+	6103:  "rets",
+	6104:  "dbdb",
+	6105:  "primaserver",
+	6106:  "mpsserver",
+	6107:  "etc-control",
+	6108:  "sercomm-scadmin",
+	6109:  "globecast-id",
+	6110:  "softcm",
+	6111:  "spc",
+	6112:  "dtspcd",
+	6118:  "tipc",
+	6122:  "bex-webadmin",
+	6123:  "backup-express",
+	6124:  "pnbs",
+	6133:  "nbt-wol",
+	6140:  "pulsonixnls",
+	6141:  "meta-corp",
+	6142:  "aspentec-lm",
+	6143:  "watershed-lm",
+	6144:  "statsci1-lm",
+	6145:  "statsci2-lm",
+	6146:  "lonewolf-lm",
+	6147:  "montage-lm",
+	6148:  "ricardo-lm",
+	6149:  "tal-pod",
+	6160:  "ecmp-data",
+	6161:  "patrol-ism",
+	6162:  "patrol-coll",
+	6163:  "pscribe",
+	6200:  "lm-x",
+	6201:  "thermo-calc",
+	6209:  "qmtps",
+	6222:  "radmind",
+	6241:  "jeol-nsddp-1",
+	6242:  "jeol-nsddp-2",
+	6243:  "jeol-nsddp-3",
+	6244:  "jeol-nsddp-4",
+	6251:  "tl1-raw-ssl",
+	6252:  "tl1-ssh",
+	6253:  "crip",
+	6268:  "grid",
+	6269:  "grid-alt",
+	6300:  "bmc-grx",
+	6301:  "bmc-ctd-ldap",
+	6306:  "ufmp",
+	6315:  "scup-disc",
+	6316:  "abb-escp",
+	6317:  "nav-data",
+	6320:  "repsvc",
+	6321:  "emp-server1",
+	6322:  "emp-server2",
+	6324:  "hrd-ns-disc",
+	6343:  "sflow",
+	6346:  "gnutella-svc",
+	6347:  "gnutella-rtr",
+	6350:  "adap",
+	6355:  "pmcs",
+	6360:  "metaedit-mu",
+	6363:  "ndn",
+	6370:  "metaedit-se",
+	6382:  "metatude-mds",
+	6389:  "clariion-evr01",
+	6390:  "metaedit-ws",
+	6417:  "faxcomservice",
+	6419:  "svdrp-disc",
+	6420:  "nim-vdrshell",
+	6421:  "nim-wan",
+	6443:  "sun-sr-https",
+	6444:  "sge-qmaster",
+	6445:  "sge-execd",
+	6446:  "mysql-proxy",
+	6455:  "skip-cert-recv",
+	6456:  "skip-cert-send",
+	6464:  "ieee11073-20701",
+	6471:  "lvision-lm",
+	6480:  "sun-sr-http",
+	6481:  "servicetags",
+	6482:  "ldoms-mgmt",
+	6483:  "SunVTS-RMI",
+	6484:  "sun-sr-jms",
+	6485:  "sun-sr-iiop",
+	6486:  "sun-sr-iiops",
+	6487:  "sun-sr-iiop-aut",
+	6488:  "sun-sr-jmx",
+	6489:  "sun-sr-admin",
+	6500:  "boks",
+	6501:  "boks-servc",
+	6502:  "boks-servm",
+	6503:  "boks-clntd",
+	6505:  "badm-priv",
+	6506:  "badm-pub",
+	6507:  "bdir-priv",
+	6508:  "bdir-pub",
+	6509:  "mgcs-mfp-port",
+	6510:  "mcer-port",
+	6511:  "dccp-udp",
+	6514:  "syslog-tls",
+	6515:  "elipse-rec",
+	6543:  "lds-distrib",
+	6544:  "lds-dump",
+	6547:  "apc-6547",
+	6548:  "apc-6548",
+	6549:  "apc-6549",
+	6550:  "fg-sysupdate",
+	6551:  "sum",
+	6558:  "xdsxdm",
+	6566:  "sane-port",
+	6568:  "rp-reputation",
+	6579:  "affiliate",
+	6580:  "parsec-master",
+	6581:  "parsec-peer",
+	6582:  "parsec-game",
+	6583:  "joaJewelSuite",
+	6619:  "odette-ftps",
+	6620:  "kftp-data",
+	6621:  "kftp",
+	6622:  "mcftp",
+	6623:  "ktelnet",
+	6626:  "wago-service",
+	6627:  "nexgen",
+	6628:  "afesc-mc",
+	6629:  "nexgen-aux",
+	6633:  "cisco-vpath-tun",
+	6634:  "mpls-pm",
+	6635:  "mpls-udp",
+	6636:  "mpls-udp-dtls",
+	6653:  "openflow",
+	6657:  "palcom-disc",
+	6670:  "vocaltec-gold",
+	6671:  "p4p-portal",
+	6672:  "vision-server",
+	6673:  "vision-elmd",
+	6678:  "vfbp-disc",
+	6679:  "osaut",
+	6689:  "tsa",
+	6696:  "babel",
+	6701:  "kti-icad-srvr",
+	6702:  "e-design-net",
+	6703:  "e-design-web",
+	6714:  "ibprotocol",
+	6715:  "fibotrader-com",
+	6767:  "bmc-perf-agent",
+	6768:  "bmc-perf-mgrd",
+	6769:  "adi-gxp-srvprt",
+	6770:  "plysrv-http",
+	6771:  "plysrv-https",
+	6784:  "bfd-lag",
+	6785:  "dgpf-exchg",
+	6786:  "smc-jmx",
+	6787:  "smc-admin",
+	6788:  "smc-http",
+	6790:  "hnmp",
+	6791:  "hnm",
+	6801:  "acnet",
+	6831:  "ambit-lm",
+	6841:  "netmo-default",
+	6842:  "netmo-http",
+	6850:  "iccrushmore",
+	6868:  "acctopus-st",
+	6888:  "muse",
+	6935:  "ethoscan",
+	6936:  "xsmsvc",
+	6946:  "bioserver",
+	6951:  "otlp",
+	6961:  "jmact3",
+	6962:  "jmevt2",
+	6963:  "swismgr1",
+	6964:  "swismgr2",
+	6965:  "swistrap",
+	6966:  "swispol",
+	6969:  "acmsoda",
+	6997:  "MobilitySrv",
+	6998:  "iatp-highpri",
+	6999:  "iatp-normalpri",
+	7000:  "afs3-fileserver",
+	7001:  "afs3-callback",
+	7002:  "afs3-prserver",
+	7003:  "afs3-vlserver",
+	7004:  "afs3-kaserver",
+	7005:  "afs3-volser",
+	7006:  "afs3-errors",
+	7007:  "afs3-bos",
+	7008:  "afs3-update",
+	7009:  "afs3-rmtsys",
+	7010:  "ups-onlinet",
+	7011:  "talon-disc",
+	7012:  "talon-engine",
+	7013:  "microtalon-dis",
+	7014:  "microtalon-com",
+	7015:  "talon-webserver",
+	7016:  "spg",
+	7017:  "grasp",
+	7019:  "doceri-view",
+	7020:  "dpserve",
+	7021:  "dpserveadmin",
+	7022:  "ctdp",
+	7023:  "ct2nmcs",
+	7024:  "vmsvc",
+	7025:  "vmsvc-2",
+	7030:  "op-probe",
+	7040:  "quest-disc",
+	7070:  "arcp",
+	7071:  "iwg1",
+	7080:  "empowerid",
+	7088:  "zixi-transport",
+	7095:  "jdp-disc",
+	7099:  "lazy-ptop",
+	7100:  "font-service",
+	7101:  "elcn",
+	7107:  "aes-x170",
+	7121:  "virprot-lm",
+	7128:  "scenidm",
+	7129:  "scenccs",
+	7161:  "cabsm-comm",
+	7162:  "caistoragemgr",
+	7163:  "cacsambroker",
+	7164:  "fsr",
+	7165:  "doc-server",
+	7166:  "aruba-server",
+	7169:  "ccag-pib",
+	7170:  "nsrp",
+	7171:  "drm-production",
+	7174:  "clutild",
+	7181:  "janus-disc",
+	7200:  "fodms",
+	7201:  "dlip",
+	7227:  "ramp",
+	7235:  "aspcoordination",
+	7244:  "frc-hicp-disc",
+	7262:  "cnap",
+	7272:  "watchme-7272",
+	7273:  "oma-rlp",
+	7274:  "oma-rlp-s",
+	7275:  "oma-ulp",
+	7276:  "oma-ilp",
+	7277:  "oma-ilp-s",
+	7278:  "oma-dcdocbs",
+	7279:  "ctxlic",
+	7280:  "itactionserver1",
+	7281:  "itactionserver2",
+	7282:  "mzca-alert",
+	7365:  "lcm-server",
+	7391:  "mindfilesys",
+	7392:  "mrssrendezvous",
+	7393:  "nfoldman",
+	7394:  "fse",
+	7395:  "winqedit",
+	7397:  "hexarc",
+	7400:  "rtps-discovery",
+	7401:  "rtps-dd-ut",
+	7402:  "rtps-dd-mt",
+	7410:  "ionixnetmon",
+	7411:  "daqstream",
+	7421:  "mtportmon",
+	7426:  "pmdmgr",
+	7427:  "oveadmgr",
+	7428:  "ovladmgr",
+	7429:  "opi-sock",
+	7430:  "xmpv7",
+	7431:  "pmd",
+	7437:  "faximum",
+	7443:  "oracleas-https",
+	7473:  "rise",
+	7491:  "telops-lmd",
+	7500:  "silhouette",
+	7501:  "ovbus",
+	7510:  "ovhpas",
+	7511:  "pafec-lm",
+	7542:  "saratoga",
+	7543:  "atul",
+	7544:  "nta-ds",
+	7545:  "nta-us",
+	7546:  "cfs",
+	7547:  "cwmp",
+	7548:  "tidp",
+	7549:  "nls-tl",
+	7550:  "cloudsignaling",
+	7560:  "sncp",
+	7566:  "vsi-omega",
+	7570:  "aries-kfinder",
+	7574:  "coherence-disc",
+	7588:  "sun-lm",
+	7606:  "mipi-debug",
+	7624:  "indi",
+	7627:  "soap-http",
+	7628:  "zen-pawn",
+	7629:  "xdas",
+	7633:  "pmdfmgt",
+	7648:  "cuseeme",
+	7674:  "imqtunnels",
+	7675:  "imqtunnel",
+	7676:  "imqbrokerd",
+	7677:  "sun-user-https",
+	7680:  "pando-pub",
+	7689:  "collaber",
+	7697:  "klio",
+	7707:  "sync-em7",
+	7708:  "scinet",
+	7720:  "medimageportal",
+	7724:  "nsdeepfreezectl",
+	7725:  "nitrogen",
+	7726:  "freezexservice",
+	7727:  "trident-data",
+	7728:  "osvr",
+	7734:  "smip",
+	7738:  "aiagent",
+	7741:  "scriptview",
+	7743:  "sstp-1",
+	7744:  "raqmon-pdu",
+	7747:  "prgp",
+	7777:  "cbt",
+	7778:  "interwise",
+	7779:  "vstat",
+	7781:  "accu-lmgr",
+	7784:  "s-bfd",
+	7786:  "minivend",
+	7787:  "popup-reminders",
+	7789:  "office-tools",
+	7794:  "q3ade",
+	7797:  "pnet-conn",
+	7798:  "pnet-enc",
+	7799:  "altbsdp",
+	7800:  "asr",
+	7801:  "ssp-client",
+	7802:  "vns-tp",
+	7810:  "rbt-wanopt",
+	7845:  "apc-7845",
+	7846:  "apc-7846",
+	7872:  "mipv6tls",
+	7880:  "pss",
+	7887:  "ubroker",
+	7900:  "mevent",
+	7901:  "tnos-sp",
+	7902:  "tnos-dp",
+	7903:  "tnos-dps",
+	7913:  "qo-secure",
+	7932:  "t2-drm",
+	7933:  "t2-brm",
+	7962:  "generalsync",
+	7967:  "supercell",
+	7979:  "micromuse-ncps",
+	7980:  "quest-vista",
+	7982:  "sossd-disc",
+	7998:  "usicontentpush",
+	7999:  "irdmi2",
+	8000:  "irdmi",
+	8001:  "vcom-tunnel",
+	8002:  "teradataordbms",
+	8003:  "mcreport",
+	8005:  "mxi",
+	8006:  "wpl-disc",
+	8007:  "warppipe",
+	8008:  "http-alt",
+	8019:  "qbdb",
+	8020:  "intu-ec-svcdisc",
+	8021:  "intu-ec-client",
+	8022:  "oa-system",
+	8025:  "ca-audit-da",
+	8026:  "ca-audit-ds",
+	8032:  "pro-ed",
+	8033:  "mindprint",
+	8034:  "vantronix-mgmt",
+	8040:  "ampify",
+	8041:  "enguity-xccetp",
+	8052:  "senomix01",
+	8053:  "senomix02",
+	8054:  "senomix03",
+	8055:  "senomix04",
+	8056:  "senomix05",
+	8057:  "senomix06",
+	8058:  "senomix07",
+	8059:  "senomix08",
+	8060:  "aero",
+	8074:  "gadugadu",
+	8080:  "http-alt",
+	8081:  "sunproxyadmin",
+	8082:  "us-cli",
+	8083:  "us-srv",
+	8086:  "d-s-n",
+	8087:  "simplifymedia",
+	8088:  "radan-http",
+	8097:  "sac",
+	8100:  "xprint-server",
+	8115:  "mtl8000-matrix",
+	8116:  "cp-cluster",
+	8118:  "privoxy",
+	8121:  "apollo-data",
+	8122:  "apollo-admin",
+	8128:  "paycash-online",
+	8129:  "paycash-wbp",
+	8130:  "indigo-vrmi",
+	8131:  "indigo-vbcp",
+	8132:  "dbabble",
+	8148:  "isdd",
+	8149:  "eor-game",
+	8160:  "patrol",
+	8161:  "patrol-snmp",
+	8182:  "vmware-fdm",
+	8184:  "itach",
+	8192:  "spytechphone",
+	8194:  "blp1",
+	8195:  "blp2",
+	8199:  "vvr-data",
+	8200:  "trivnet1",
+	8201:  "trivnet2",
+	8202:  "aesop",
+	8204:  "lm-perfworks",
+	8205:  "lm-instmgr",
+	8206:  "lm-dta",
+	8207:  "lm-sserver",
+	8208:  "lm-webwatcher",
+	8230:  "rexecj",
+	8231:  "hncp-udp-port",
+	8232:  "hncp-dtls-port",
+	8243:  "synapse-nhttps",
+	8276:  "pando-sec",
+	8280:  "synapse-nhttp",
+	8282:  "libelle-disc",
+	8292:  "blp3",
+	8294:  "blp4",
+	8300:  "tmi",
+	8301:  "amberon",
+	8320:  "tnp-discover",
+	8321:  "tnp",
+	8322:  "garmin-marine",
+	8351:  "server-find",
+	8376:  "cruise-enum",
+	8377:  "cruise-swroute",
+	8378:  "cruise-config",
+	8379:  "cruise-diags",
+	8380:  "cruise-update",
+	8383:  "m2mservices",
+	8384:  "marathontp",
+	8400:  "cvd",
+	8401:  "sabarsd",
+	8402:  "abarsd",
+	8403:  "admind",
+	8416:  "espeech",
+	8417:  "espeech-rtp",
+	8442:  "cybro-a-bus",
+	8443:  "pcsync-https",
+	8444:  "pcsync-http",
+	8445:  "copy-disc",
+	8450:  "npmp",
+	8472:  "otv",
+	8473:  "vp2p",
+	8474:  "noteshare",
+	8500:  "fmtp",
+	8501:  "cmtp-av",
+	8503:  "lsp-self-ping",
+	8554:  "rtsp-alt",
+	8555:  "d-fence",
+	8567:  "dof-tunnel",
+	8600:  "asterix",
+	8609:  "canon-cpp-disc",
+	8610:  "canon-mfnp",
+	8611:  "canon-bjnp1",
+	8612:  "canon-bjnp2",
+	8613:  "canon-bjnp3",
+	8614:  "canon-bjnp4",
+	8675:  "msi-cps-rm-disc",
+	8686:  "sun-as-jmxrmi",
+	8732:  "dtp-net",
+	8733:  "ibus",
+	8763:  "mc-appserver",
+	8764:  "openqueue",
+	8765:  "ultraseek-http",
+	8766:  "amcs",
+	8770:  "dpap",
+	8786:  "msgclnt",
+	8787:  "msgsrvr",
+	8793:  "acd-pm",
+	8800:  "sunwebadmin",
+	8804:  "truecm",
+	8805:  "pfcp",
+	8808:  "ssports-bcast",
+	8873:  "dxspider",
+	8880:  "cddbp-alt",
+	8883:  "secure-mqtt",
+	8888:  "ddi-udp-1",
+	8889:  "ddi-udp-2",
+	8890:  "ddi-udp-3",
+	8891:  "ddi-udp-4",
+	8892:  "ddi-udp-5",
+	8893:  "ddi-udp-6",
+	8894:  "ddi-udp-7",
+	8899:  "ospf-lite",
+	8900:  "jmb-cds1",
+	8901:  "jmb-cds2",
+	8910:  "manyone-http",
+	8911:  "manyone-xml",
+	8912:  "wcbackup",
+	8913:  "dragonfly",
+	8954:  "cumulus-admin",
+	8980:  "nod-provider",
+	8981:  "nod-client",
+	8989:  "sunwebadmins",
+	8990:  "http-wmap",
+	8991:  "https-wmap",
+	8999:  "bctp",
+	9000:  "cslistener",
+	9001:  "etlservicemgr",
+	9002:  "dynamid",
+	9007:  "ogs-client",
+	9009:  "pichat",
+	9020:  "tambora",
+	9021:  "panagolin-ident",
+	9022:  "paragent",
+	9023:  "swa-1",
+	9024:  "swa-2",
+	9025:  "swa-3",
+	9026:  "swa-4",
+	9060:  "CardWeb-RT",
+	9080:  "glrpc",
+	9084:  "aurora",
+	9085:  "ibm-rsyscon",
+	9086:  "net2display",
+	9087:  "classic",
+	9088:  "sqlexec",
+	9089:  "sqlexec-ssl",
+	9090:  "websm",
+	9091:  "xmltec-xmlmail",
+	9092:  "XmlIpcRegSvc",
+	9100:  "hp-pdl-datastr",
+	9101:  "bacula-dir",
+	9102:  "bacula-fd",
+	9103:  "bacula-sd",
+	9104:  "peerwire",
+	9105:  "xadmin",
+	9106:  "astergate-disc",
+	9119:  "mxit",
+	9131:  "dddp",
+	9160:  "apani1",
+	9161:  "apani2",
+	9162:  "apani3",
+	9163:  "apani4",
+	9164:  "apani5",
+	9191:  "sun-as-jpda",
+	9200:  "wap-wsp",
+	9201:  "wap-wsp-wtp",
+	9202:  "wap-wsp-s",
+	9203:  "wap-wsp-wtp-s",
+	9204:  "wap-vcard",
+	9205:  "wap-vcal",
+	9206:  "wap-vcard-s",
+	9207:  "wap-vcal-s",
+	9208:  "rjcdb-vcards",
+	9209:  "almobile-system",
+	9210:  "oma-mlp",
+	9211:  "oma-mlp-s",
+	9212:  "serverviewdbms",
+	9213:  "serverstart",
+	9214:  "ipdcesgbs",
+	9215:  "insis",
+	9216:  "acme",
+	9217:  "fsc-port",
+	9222:  "teamcoherence",
+	9255:  "mon",
+	9277:  "traingpsdata",
+	9278:  "pegasus",
+	9279:  "pegasus-ctl",
+	9280:  "pgps",
+	9281:  "swtp-port1",
+	9282:  "swtp-port2",
+	9283:  "callwaveiam",
+	9284:  "visd",
+	9285:  "n2h2server",
+	9286:  "n2receive",
+	9287:  "cumulus",
+	9292:  "armtechdaemon",
+	9293:  "storview",
+	9294:  "armcenterhttp",
+	9295:  "armcenterhttps",
+	9300:  "vrace",
+	9318:  "secure-ts",
+	9321:  "guibase",
+	9343:  "mpidcmgr",
+	9344:  "mphlpdmc",
+	9346:  "ctechlicensing",
+	9374:  "fjdmimgr",
+	9380:  "boxp",
+	9396:  "fjinvmgr",
+	9397:  "mpidcagt",
+	9400:  "sec-t4net-srv",
+	9401:  "sec-t4net-clt",
+	9402:  "sec-pc2fax-srv",
+	9418:  "git",
+	9443:  "tungsten-https",
+	9444:  "wso2esb-console",
+	9450:  "sntlkeyssrvr",
+	9500:  "ismserver",
+	9522:  "sma-spw",
+	9535:  "mngsuite",
+	9536:  "laes-bf",
+	9555:  "trispen-sra",
+	9592:  "ldgateway",
+	9593:  "cba8",
+	9594:  "msgsys",
+	9595:  "pds",
+	9596:  "mercury-disc",
+	9597:  "pd-admin",
+	9598:  "vscp",
+	9599:  "robix",
+	9600:  "micromuse-ncpw",
+	9612:  "streamcomm-ds",
+	9618:  "condor",
+	9628:  "odbcpathway",
+	9629:  "uniport",
+	9632:  "mc-comm",
+	9667:  "xmms2",
+	9668:  "tec5-sdctp",
+	9694:  "client-wakeup",
+	9695:  "ccnx",
+	9700:  "board-roar",
+	9747:  "l5nas-parchan",
+	9750:  "board-voip",
+	9753:  "rasadv",
+	9762:  "tungsten-http",
+	9800:  "davsrc",
+	9801:  "sstp-2",
+	9802:  "davsrcs",
+	9875:  "sapv1",
+	9878:  "kca-service",
+	9888:  "cyborg-systems",
+	9889:  "gt-proxy",
+	9898:  "monkeycom",
+	9899:  "sctp-tunneling",
+	9900:  "iua",
+	9901:  "enrp",
+	9903:  "multicast-ping",
+	9909:  "domaintime",
+	9911:  "sype-transport",
+	9950:  "apc-9950",
+	9951:  "apc-9951",
+	9952:  "apc-9952",
+	9953:  "acis",
+	9955:  "alljoyn-mcm",
+	9956:  "alljoyn",
+	9966:  "odnsp",
+	9987:  "dsm-scm-target",
+	9990:  "osm-appsrvr",
+	9991:  "osm-oev",
+	9992:  "palace-1",
+	9993:  "palace-2",
+	9994:  "palace-3",
+	9995:  "palace-4",
+	9996:  "palace-5",
+	9997:  "palace-6",
+	9998:  "distinct32",
+	9999:  "distinct",
+	10000: "ndmp",
+	10001: "scp-config",
+	10002: "documentum",
+	10003: "documentum-s",
+	10007: "mvs-capacity",
+	10008: "octopus",
+	10009: "swdtp-sv",
+	10050: "zabbix-agent",
+	10051: "zabbix-trapper",
+	10080: "amanda",
+	10081: "famdc",
+	10100: "itap-ddtp",
+	10101: "ezmeeting-2",
+	10102: "ezproxy-2",
+	10103: "ezrelay",
+	10104: "swdtp",
+	10107: "bctp-server",
+	10110: "nmea-0183",
+	10111: "nmea-onenet",
+	10113: "netiq-endpoint",
+	10114: "netiq-qcheck",
+	10115: "netiq-endpt",
+	10116: "netiq-voipa",
+	10117: "iqrm",
+	10128: "bmc-perf-sd",
+	10160: "qb-db-server",
+	10161: "snmpdtls",
+	10162: "snmpdtls-trap",
+	10200: "trisoap",
+	10201: "rscs",
+	10252: "apollo-relay",
+	10253: "eapol-relay",
+	10260: "axis-wimp-port",
+	10288: "blocks",
+	10439: "bngsync",
+	10500: "hip-nat-t",
+	10540: "MOS-lower",
+	10541: "MOS-upper",
+	10542: "MOS-aux",
+	10543: "MOS-soap",
+	10544: "MOS-soap-opt",
+	10800: "gap",
+	10805: "lpdg",
+	10810: "nmc-disc",
+	10860: "helix",
+	10880: "bveapi",
+	10990: "rmiaux",
+	11000: "irisa",
+	11001: "metasys",
+	10023: "cefd-vmp",
+	11095: "weave",
+	11106: "sgi-lk",
+	11108: "myq-termlink",
+	11111: "vce",
+	11112: "dicom",
+	11161: "suncacao-snmp",
+	11162: "suncacao-jmxmp",
+	11163: "suncacao-rmi",
+	11164: "suncacao-csa",
+	11165: "suncacao-websvc",
+	11171: "snss",
+	11201: "smsqp",
+	11208: "wifree",
+	11211: "memcache",
+	11319: "imip",
+	11320: "imip-channels",
+	11321: "arena-server",
+	11367: "atm-uhas",
+	11371: "hkp",
+	11430: "lsdp",
+	11600: "tempest-port",
+	11720: "h323callsigalt",
+	11723: "emc-xsw-dcache",
+	11751: "intrepid-ssl",
+	11796: "lanschool-mpt",
+	11876: "xoraya",
+	11877: "x2e-disc",
+	11967: "sysinfo-sp",
+	12000: "entextxid",
+	12001: "entextnetwk",
+	12002: "entexthigh",
+	12003: "entextmed",
+	12004: "entextlow",
+	12005: "dbisamserver1",
+	12006: "dbisamserver2",
+	12007: "accuracer",
+	12008: "accuracer-dbms",
+	12009: "ghvpn",
+	12012: "vipera",
+	12013: "vipera-ssl",
+	12109: "rets-ssl",
+	12121: "nupaper-ss",
+	12168: "cawas",
+	12172: "hivep",
+	12300: "linogridengine",
+	12321: "warehouse-sss",
+	12322: "warehouse",
+	12345: "italk",
+	12753: "tsaf",
+	13160: "i-zipqd",
+	13216: "bcslogc",
+	13217: "rs-pias",
+	13218: "emc-vcas-udp",
+	13223: "powwow-client",
+	13224: "powwow-server",
+	13400: "doip-disc",
+	13720: "bprd",
+	13721: "bpdbm",
+	13722: "bpjava-msvc",
+	13724: "vnetd",
+	13782: "bpcd",
+	13783: "vopied",
+	13785: "nbdb",
+	13786: "nomdb",
+	13818: "dsmcc-config",
+	13819: "dsmcc-session",
+	13820: "dsmcc-passthru",
+	13821: "dsmcc-download",
+	13822: "dsmcc-ccp",
+	13894: "ucontrol",
+	13929: "dta-systems",
+	14000: "scotty-ft",
+	14001: "sua",
+	14002: "scotty-disc",
+	14033: "sage-best-com1",
+	14034: "sage-best-com2",
+	14141: "vcs-app",
+	14142: "icpp",
+	14145: "gcm-app",
+	14149: "vrts-tdd",
+	14154: "vad",
+	14250: "cps",
+	14414: "ca-web-update",
+	14936: "hde-lcesrvr-1",
+	14937: "hde-lcesrvr-2",
+	15000: "hydap",
+	15118: "v2g-secc",
+	15345: "xpilot",
+	15363: "3link",
+	15555: "cisco-snat",
+	15660: "bex-xr",
+	15740: "ptp",
+	15998: "2ping",
+	16003: "alfin",
+	16161: "sun-sea-port",
+	16309: "etb4j",
+	16310: "pduncs",
+	16311: "pdefmns",
+	16360: "netserialext1",
+	16361: "netserialext2",
+	16367: "netserialext3",
+	16368: "netserialext4",
+	16384: "connected",
+	16666: "vtp",
+	16900: "newbay-snc-mc",
+	16950: "sgcip",
+	16991: "intel-rci-mp",
+	16992: "amt-soap-http",
+	16993: "amt-soap-https",
+	16994: "amt-redir-tcp",
+	16995: "amt-redir-tls",
+	17007: "isode-dua",
+	17185: "soundsvirtual",
+	17219: "chipper",
+	17220: "avtp",
+	17221: "avdecc",
+	17222: "cpsp",
+	17224: "trdp-pd",
+	17225: "trdp-md",
+	17234: "integrius-stp",
+	17235: "ssh-mgmt",
+	17500: "db-lsp-disc",
+	17729: "ea",
+	17754: "zep",
+	17755: "zigbee-ip",
+	17756: "zigbee-ips",
+	18000: "biimenu",
+	18181: "opsec-cvp",
+	18182: "opsec-ufp",
+	18183: "opsec-sam",
+	18184: "opsec-lea",
+	18185: "opsec-omi",
+	18186: "ohsc",
+	18187: "opsec-ela",
+	18241: "checkpoint-rtm",
+	18262: "gv-pf",
+	18463: "ac-cluster",
+	18634: "rds-ib",
+	18635: "rds-ip",
+	18668: "vdmmesh-disc",
+	18769: "ique",
+	18881: "infotos",
+	18888: "apc-necmp",
+	19000: "igrid",
+	19007: "scintilla",
+	19191: "opsec-uaa",
+	19194: "ua-secureagent",
+	19220: "cora-disc",
+	19283: "keysrvr",
+	19315: "keyshadow",
+	19398: "mtrgtrans",
+	19410: "hp-sco",
+	19411: "hp-sca",
+	19412: "hp-sessmon",
+	19539: "fxuptp",
+	19540: "sxuptp",
+	19541: "jcp",
+	19788: "mle",
+	19999: "dnp-sec",
+	20000: "dnp",
+	20001: "microsan",
+	20002: "commtact-http",
+	20003: "commtact-https",
+	20005: "openwebnet",
+	20012: "ss-idi-disc",
+	20014: "opendeploy",
+	20034: "nburn-id",
+	20046: "tmophl7mts",
+	20048: "mountd",
+	20049: "nfsrdma",
+	20167: "tolfab",
+	20202: "ipdtp-port",
+	20222: "ipulse-ics",
+	20480: "emwavemsg",
+	20670: "track",
+	20999: "athand-mmp",
+	21000: "irtrans",
+	21554: "dfserver",
+	21590: "vofr-gateway",
+	21800: "tvpm",
+	21845: "webphone",
+	21846: "netspeak-is",
+	21847: "netspeak-cs",
+	21848: "netspeak-acd",
+	21849: "netspeak-cps",
+	22000: "snapenetio",
+	22001: "optocontrol",
+	22002: "optohost002",
+	22003: "optohost003",
+	22004: "optohost004",
+	22005: "optohost004",
+	22273: "wnn6",
+	22305: "cis",
+	22335: "shrewd-stream",
+	22343: "cis-secure",
+	22347: "wibukey",
+	22350: "codemeter",
+	22555: "vocaltec-phone",
+	22763: "talikaserver",
+	22800: "aws-brf",
+	22951: "brf-gw",
+	23000: "inovaport1",
+	23001: "inovaport2",
+	23002: "inovaport3",
+	23003: "inovaport4",
+	23004: "inovaport5",
+	23005: "inovaport6",
+	23272: "s102",
+	23294: "5afe-disc",
+	23333: "elxmgmt",
+	23400: "novar-dbase",
+	23401: "novar-alarm",
+	23402: "novar-global",
+	24000: "med-ltp",
+	24001: "med-fsp-rx",
+	24002: "med-fsp-tx",
+	24003: "med-supp",
+	24004: "med-ovw",
+	24005: "med-ci",
+	24006: "med-net-svc",
+	24242: "filesphere",
+	24249: "vista-4gl",
+	24321: "ild",
+	24322: "hid",
+	24386: "intel-rci",
+	24465: "tonidods",
+	24554: "binkp",
+	24577: "bilobit-update",
+	24676: "canditv",
+	24677: "flashfiler",
+	24678: "proactivate",
+	24680: "tcc-http",
+	24850: "assoc-disc",
+	24922: "find",
+	25000: "icl-twobase1",
+	25001: "icl-twobase2",
+	25002: "icl-twobase3",
+	25003: "icl-twobase4",
+	25004: "icl-twobase5",
+	25005: "icl-twobase6",
+	25006: "icl-twobase7",
+	25007: "icl-twobase8",
+	25008: "icl-twobase9",
+	25009: "icl-twobase10",
+	25793: "vocaltec-hos",
+	25900: "tasp-net",
+	25901: "niobserver",
+	25902: "nilinkanalyst",
+	25903: "niprobe",
+	25954: "bf-game",
+	25955: "bf-master",
+	26000: "quake",
+	26133: "scscp",
+	26208: "wnn6-ds",
+	26260: "ezproxy",
+	26261: "ezmeeting",
+	26262: "k3software-svr",
+	26263: "k3software-cli",
+	26486: "exoline-udp",
+	26487: "exoconfig",
+	26489: "exonet",
+	27345: "imagepump",
+	27442: "jesmsjc",
+	27504: "kopek-httphead",
+	27782: "ars-vista",
+	27999: "tw-auth-key",
+	28000: "nxlmd",
+	28119: "a27-ran-ran",
+	28200: "voxelstorm",
+	28240: "siemensgsm",
+	29167: "otmp",
+	30001: "pago-services1",
+	30002: "pago-services2",
+	30003: "amicon-fpsu-ra",
+	30004: "amicon-fpsu-s",
+	30260: "kingdomsonline",
+	30832: "samsung-disc",
+	30999: "ovobs",
+	31016: "ka-kdp",
+	31029: "yawn",
+	31416: "xqosd",
+	31457: "tetrinet",
+	31620: "lm-mon",
+	31765: "gamesmith-port",
+	31948: "iceedcp-tx",
+	31949: "iceedcp-rx",
+	32034: "iracinghelper",
+	32249: "t1distproc60",
+	32483: "apm-link",
+	32635: "sec-ntb-clnt",
+	32636: "DMExpress",
+	32767: "filenet-powsrm",
+	32768: "filenet-tms",
+	32769: "filenet-rpc",
+	32770: "filenet-nch",
+	32771: "filenet-rmi",
+	32772: "filenet-pa",
+	32773: "filenet-cm",
+	32774: "filenet-re",
+	32775: "filenet-pch",
+	32776: "filenet-peior",
+	32777: "filenet-obrok",
+	32801: "mlsn",
+	32896: "idmgratm",
+	33123: "aurora-balaena",
+	33331: "diamondport",
+	33334: "speedtrace-disc",
+	33434: "traceroute",
+	33656: "snip-slave",
+	34249: "turbonote-2",
+	34378: "p-net-local",
+	34379: "p-net-remote",
+	34567: "edi_service",
+	34962: "profinet-rt",
+	34963: "profinet-rtm",
+	34964: "profinet-cm",
+	34980: "ethercat",
+	35001: "rt-viewer",
+	35004: "rt-classmanager",
+	35100: "axio-disc",
+	35355: "altova-lm-disc",
+	36001: "allpeers",
+	36411: "wlcp",
+	36865: "kastenxpipe",
+	37475: "neckar",
+	37654: "unisys-eportal",
+	38002: "crescoctrl-disc",
+	38201: "galaxy7-data",
+	38202: "fairview",
+	38203: "agpolicy",
+	39681: "turbonote-1",
+	40000: "safetynetp",
+	40023: "k-patentssensor",
+	40841: "cscp",
+	40842: "csccredir",
+	40843: "csccfirewall",
+	40853: "ortec-disc",
+	41111: "fs-qos",
+	41230: "z-wave-s",
+	41794: "crestron-cip",
+	41795: "crestron-ctp",
+	42508: "candp",
+	42509: "candrp",
+	42510: "caerpc",
+	43000: "recvr-rc-disc",
+	43188: "reachout",
+	43189: "ndm-agent-port",
+	43190: "ip-provision",
+	43210: "shaperai-disc",
+	43439: "eq3-config",
+	43440: "ew-disc-cmd",
+	43441: "ciscocsdb",
+	44321: "pmcd",
+	44322: "pmcdproxy",
+	44544: "domiq",
+	44553: "rbr-debug",
+	44600: "asihpi",
+	44818: "EtherNet-IP-2",
+	44900: "m3da-disc",
+	45000: "asmp-mon",
+	45054: "invision-ag",
+	45514: "cloudcheck-ping",
+	45678: "eba",
+	45825: "qdb2service",
+	45966: "ssr-servermgr",
+	46999: "mediabox",
+	47000: "mbus",
+	47100: "jvl-mactalk",
+	47557: "dbbrowse",
+	47624: "directplaysrvr",
+	47806: "ap",
+	47808: "bacnet",
+	47809: "presonus-ucnet",
+	48000: "nimcontroller",
+	48001: "nimspooler",
+	48002: "nimhub",
+	48003: "nimgtw",
+	48128: "isnetserv",
+	48129: "blp5",
+	48556: "com-bardac-dw",
+	48619: "iqobject",
+	48653: "robotraconteur",
+	49001: "nusdp-disc",
+}
+var sctpPortNames = map[SCTPPort]string{
+	9:     "discard",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	80:    "http",
+	179:   "bgp",
+	443:   "https",
+	1021:  "exp1",
+	1022:  "exp2",
+	1167:  "cisco-ipsla",
+	1720:  "h323hostcall",
+	2049:  "nfs",
+	2225:  "rcip-itu",
+	2904:  "m2ua",
+	2905:  "m3ua",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	3097:  "itu-bicc-stc",
+	3565:  "m2pa",
+	3863:  "asap-sctp",
+	3864:  "asap-sctp-tls",
+	3868:  "diameter",
+	4333:  "ahsp",
+	4502:  "a25-fap-fgw",
+	4711:  "trinity-dist",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	5060:  "sip",
+	5061:  "sips",
+	5090:  "car",
+	5091:  "cxtp",
+	5215:  "noteza",
+	5445:  "smbdirect",
+	5672:  "amqp",
+	5675:  "v5ua",
+	5868:  "diameters",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	6704:  "frc-hp",
+	6705:  "frc-mp",
+	6706:  "frc-lp",
+	6970:  "conductor-mpx",
+	7626:  "simco",
+	7701:  "nfapi",
+	7728:  "osvr",
+	8471:  "pim-port",
+	9082:  "lcs-ap",
+	9084:  "aurora",
+	9900:  "iua",
+	9901:  "enrp-sctp",
+	9902:  "enrp-sctp-tls",
+	11997: "wmereceiving",
+	11998: "wmedistribution",
+	11999: "wmereporting",
+	14001: "sua",
+	20049: "nfsrdma",
+	25471: "rna",
+	29118: "sgsap",
+	29168: "sbcap",
+	29169: "iuhsctpassoc",
+	30100: "rwp",
+	36412: "s1-control",
+	36422: "x2-control",
+	36423: "slmap",
+	36424: "nq-ap",
+	36443: "m2ap",
+	36444: "m3ap",
+	36462: "xw-control",
+	38412: "ng-control",
+	38422: "xn-control",
+	38472: "f1-control",
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/google/gopacket/layers/icmp4.go
new file mode 100644
index 0000000..bd3f03f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp4.go
@@ -0,0 +1,267 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	ICMPv4TypeEchoReply              = 0
+	ICMPv4TypeDestinationUnreachable = 3
+	ICMPv4TypeSourceQuench           = 4
+	ICMPv4TypeRedirect               = 5
+	ICMPv4TypeEchoRequest            = 8
+	ICMPv4TypeRouterAdvertisement    = 9
+	ICMPv4TypeRouterSolicitation     = 10
+	ICMPv4TypeTimeExceeded           = 11
+	ICMPv4TypeParameterProblem       = 12
+	ICMPv4TypeTimestampRequest       = 13
+	ICMPv4TypeTimestampReply         = 14
+	ICMPv4TypeInfoRequest            = 15
+	ICMPv4TypeInfoReply              = 16
+	ICMPv4TypeAddressMaskRequest     = 17
+	ICMPv4TypeAddressMaskReply       = 18
+)
+
+const (
+	// DestinationUnreachable
+	ICMPv4CodeNet                 = 0
+	ICMPv4CodeHost                = 1
+	ICMPv4CodeProtocol            = 2
+	ICMPv4CodePort                = 3
+	ICMPv4CodeFragmentationNeeded = 4
+	ICMPv4CodeSourceRoutingFailed = 5
+	ICMPv4CodeNetUnknown          = 6
+	ICMPv4CodeHostUnknown         = 7
+	ICMPv4CodeSourceIsolated      = 8
+	ICMPv4CodeNetAdminProhibited  = 9
+	ICMPv4CodeHostAdminProhibited = 10
+	ICMPv4CodeNetTOS              = 11
+	ICMPv4CodeHostTOS             = 12
+	ICMPv4CodeCommAdminProhibited = 13
+	ICMPv4CodeHostPrecedence      = 14
+	ICMPv4CodePrecedenceCutoff    = 15
+
+	// TimeExceeded
+	ICMPv4CodeTTLExceeded                    = 0
+	ICMPv4CodeFragmentReassemblyTimeExceeded = 1
+
+	// ParameterProblem
+	ICMPv4CodePointerIndicatesError = 0
+	ICMPv4CodeMissingOption         = 1
+	ICMPv4CodeBadLength             = 2
+
+	// Redirect
+	// ICMPv4CodeNet  = same as for DestinationUnreachable
+	// ICMPv4CodeHost = same as for DestinationUnreachable
+	ICMPv4CodeTOSNet  = 2
+	ICMPv4CodeTOSHost = 3
+)
+
+type icmpv4TypeCodeInfoStruct struct {
+	typeStr string
+	codeStr *map[uint8]string
+}
+
+var (
+	icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{
+		ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{
+			"DestinationUnreachable", &map[uint8]string{
+				ICMPv4CodeNet:                 "Net",
+				ICMPv4CodeHost:                "Host",
+				ICMPv4CodeProtocol:            "Protocol",
+				ICMPv4CodePort:                "Port",
+				ICMPv4CodeFragmentationNeeded: "FragmentationNeeded",
+				ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed",
+				ICMPv4CodeNetUnknown:          "NetUnknown",
+				ICMPv4CodeHostUnknown:         "HostUnknown",
+				ICMPv4CodeSourceIsolated:      "SourceIsolated",
+				ICMPv4CodeNetAdminProhibited:  "NetAdminProhibited",
+				ICMPv4CodeHostAdminProhibited: "HostAdminProhibited",
+				ICMPv4CodeNetTOS:              "NetTOS",
+				ICMPv4CodeHostTOS:             "HostTOS",
+				ICMPv4CodeCommAdminProhibited: "CommAdminProhibited",
+				ICMPv4CodeHostPrecedence:      "HostPrecedence",
+				ICMPv4CodePrecedenceCutoff:    "PrecedenceCutoff",
+			},
+		},
+		ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{
+			"TimeExceeded", &map[uint8]string{
+				ICMPv4CodeTTLExceeded:                    "TTLExceeded",
+				ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+			},
+		},
+		ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{
+			"ParameterProblem", &map[uint8]string{
+				ICMPv4CodePointerIndicatesError: "PointerIndicatesError",
+				ICMPv4CodeMissingOption:         "MissingOption",
+				ICMPv4CodeBadLength:             "BadLength",
+			},
+		},
+		ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{
+			"SourceQuench", nil,
+		},
+		ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{
+			"Redirect", &map[uint8]string{
+				ICMPv4CodeNet:     "Net",
+				ICMPv4CodeHost:    "Host",
+				ICMPv4CodeTOSNet:  "TOS+Net",
+				ICMPv4CodeTOSHost: "TOS+Host",
+			},
+		},
+		ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{
+			"EchoRequest", nil,
+		},
+		ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{
+			"EchoReply", nil,
+		},
+		ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{
+			"TimestampRequest", nil,
+		},
+		ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{
+			"TimestampReply", nil,
+		},
+		ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{
+			"InfoRequest", nil,
+		},
+		ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{
+			"InfoReply", nil,
+		},
+		ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{
+			"RouterSolicitation", nil,
+		},
+		ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{
+			"RouterAdvertisement", nil,
+		},
+		ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{
+			"AddressMaskRequest", nil,
+		},
+		ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{
+			"AddressMaskReply", nil,
+		},
+	}
+)
+
+type ICMPv4TypeCode uint16
+
+// Type returns the ICMPv4 type field.
+func (a ICMPv4TypeCode) Type() uint8 {
+	return uint8(a >> 8)
+}
+
+// Code returns the ICMPv4 code field.
+func (a ICMPv4TypeCode) Code() uint8 {
+	return uint8(a)
+}
+
+func (a ICMPv4TypeCode) String() string {
+	t, c := a.Type(), a.Code()
+	strInfo, ok := icmpv4TypeCodeInfo[t]
+	if !ok {
+		// Unknown ICMPv4 type field
+		return fmt.Sprintf("%d(%d)", t, c)
+	}
+	typeStr := strInfo.typeStr
+	if strInfo.codeStr == nil && c == 0 {
+		// The ICMPv4 type does not make use of the code field
+		return fmt.Sprintf("%s", strInfo.typeStr)
+	}
+	if strInfo.codeStr == nil && c != 0 {
+		// The ICMPv4 type does not make use of the code field, but it is present anyway
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	codeStr, ok := (*strInfo.codeStr)[c]
+	if !ok {
+		// We don't know this ICMPv4 code; print the numerical value
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv4TypeCode) GoString() string {
+	t := reflect.TypeOf(a)
+	return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer.
+func (a ICMPv4TypeCode) SerializeTo(bytes []byte) {
+	binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode
+// gopacket type from the ICMPv4 type and code values.
+func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode {
+	return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv4 is the layer for IPv4 ICMP packet data.
+type ICMPv4 struct {
+	BaseLayer
+	TypeCode ICMPv4TypeCode
+	Checksum uint16
+	Id       uint16
+	Seq      uint16
+}
+
+// LayerType returns LayerTypeICMPv4.
+func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet")
+	}
+	i.TypeCode = CreateICMPv4TypeCode(data[0], data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.Id = binary.BigEndian.Uint16(data[4:6])
+	i.Seq = binary.BigEndian.Uint16(data[6:8])
+	i.BaseLayer = BaseLayer{data[:8], data[8:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	i.TypeCode.SerializeTo(bytes)
+	binary.BigEndian.PutUint16(bytes[4:], i.Id)
+	binary.BigEndian.PutUint16(bytes[6:], i.Seq)
+	if opts.ComputeChecksums {
+		bytes[2] = 0
+		bytes[3] = 0
+		i.Checksum = tcpipChecksum(b.Bytes(), 0)
+	}
+	binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv4) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv4{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/google/gopacket/layers/icmp6.go
new file mode 100644
index 0000000..09afd11
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6.go
@@ -0,0 +1,266 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// The following are from RFC 4443
+	ICMPv6TypeDestinationUnreachable = 1
+	ICMPv6TypePacketTooBig           = 2
+	ICMPv6TypeTimeExceeded           = 3
+	ICMPv6TypeParameterProblem       = 4
+	ICMPv6TypeEchoRequest            = 128
+	ICMPv6TypeEchoReply              = 129
+
+	// The following are from RFC 4861
+	ICMPv6TypeRouterSolicitation    = 133
+	ICMPv6TypeRouterAdvertisement   = 134
+	ICMPv6TypeNeighborSolicitation  = 135
+	ICMPv6TypeNeighborAdvertisement = 136
+	ICMPv6TypeRedirect              = 137
+
+	// The following are from RFC 2710
+	ICMPv6TypeMLDv1MulticastListenerQueryMessage  = 130
+	ICMPv6TypeMLDv1MulticastListenerReportMessage = 131
+	ICMPv6TypeMLDv1MulticastListenerDoneMessage   = 132
+
+	// The following are from RFC 3810
+	ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143
+)
+
+const (
+	// DestinationUnreachable
+	ICMPv6CodeNoRouteToDst           = 0
+	ICMPv6CodeAdminProhibited        = 1
+	ICMPv6CodeBeyondScopeOfSrc       = 2
+	ICMPv6CodeAddressUnreachable     = 3
+	ICMPv6CodePortUnreachable        = 4
+	ICMPv6CodeSrcAddressFailedPolicy = 5
+	ICMPv6CodeRejectRouteToDst       = 6
+
+	// TimeExceeded
+	ICMPv6CodeHopLimitExceeded               = 0
+	ICMPv6CodeFragmentReassemblyTimeExceeded = 1
+
+	// ParameterProblem
+	ICMPv6CodeErroneousHeaderField   = 0
+	ICMPv6CodeUnrecognizedNextHeader = 1
+	ICMPv6CodeUnrecognizedIPv6Option = 2
+)
+
+type icmpv6TypeCodeInfoStruct struct {
+	typeStr string
+	codeStr *map[uint8]string
+}
+
+var (
+	icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{
+		ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{
+			"DestinationUnreachable", &map[uint8]string{
+				ICMPv6CodeNoRouteToDst:           "NoRouteToDst",
+				ICMPv6CodeAdminProhibited:        "AdminProhibited",
+				ICMPv6CodeBeyondScopeOfSrc:       "BeyondScopeOfSrc",
+				ICMPv6CodeAddressUnreachable:     "AddressUnreachable",
+				ICMPv6CodePortUnreachable:        "PortUnreachable",
+				ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy",
+				ICMPv6CodeRejectRouteToDst:       "RejectRouteToDst",
+			},
+		},
+		ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{
+			"PacketTooBig", nil,
+		},
+		ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{
+			"TimeExceeded", &map[uint8]string{
+				ICMPv6CodeHopLimitExceeded:               "HopLimitExceeded",
+				ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+			},
+		},
+		ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{
+			"ParameterProblem", &map[uint8]string{
+				ICMPv6CodeErroneousHeaderField:   "ErroneousHeaderField",
+				ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader",
+				ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option",
+			},
+		},
+		ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{
+			"EchoRequest", nil,
+		},
+		ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{
+			"EchoReply", nil,
+		},
+		ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{
+			"RouterSolicitation", nil,
+		},
+		ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{
+			"RouterAdvertisement", nil,
+		},
+		ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{
+			"NeighborSolicitation", nil,
+		},
+		ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{
+			"NeighborAdvertisement", nil,
+		},
+		ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{
+			"Redirect", nil,
+		},
+	}
+)
+
+type ICMPv6TypeCode uint16
+
+// Type returns the ICMPv6 type field.
+func (a ICMPv6TypeCode) Type() uint8 {
+	return uint8(a >> 8)
+}
+
+// Code returns the ICMPv6 code field.
+func (a ICMPv6TypeCode) Code() uint8 {
+	return uint8(a)
+}
+
+func (a ICMPv6TypeCode) String() string {
+	t, c := a.Type(), a.Code()
+	strInfo, ok := icmpv6TypeCodeInfo[t]
+	if !ok {
+		// Unknown ICMPv6 type field
+		return fmt.Sprintf("%d(%d)", t, c)
+	}
+	typeStr := strInfo.typeStr
+	if strInfo.codeStr == nil && c == 0 {
+		// The ICMPv6 type does not make use of the code field
+		return fmt.Sprintf("%s", strInfo.typeStr)
+	}
+	if strInfo.codeStr == nil && c != 0 {
+		// The ICMPv6 type does not make use of the code field, but it is present anyway
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	codeStr, ok := (*strInfo.codeStr)[c]
+	if !ok {
+		// We don't know this ICMPv6 code; print the numerical value
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv6TypeCode) GoString() string {
+	t := reflect.TypeOf(a)
+	return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer.
+func (a ICMPv6TypeCode) SerializeTo(bytes []byte) {
+	binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode
+// gopacket type from the ICMPv6 type and code values.
+func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode {
+	return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv6 is the layer for IPv6 ICMP packet data
+type ICMPv6 struct {
+	BaseLayer
+	TypeCode ICMPv6TypeCode
+	Checksum uint16
+	// TypeBytes is deprecated and always nil. See the different ICMPv6 message types
+	// instead (e.g. ICMPv6TypeRouterSolicitation).
+	TypeBytes []byte
+	tcpipchecksum
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet")
+	}
+	i.TypeCode = CreateICMPv6TypeCode(data[0], data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	i.TypeCode.SerializeTo(bytes)
+
+	if opts.ComputeChecksums {
+		bytes[2] = 0
+		bytes[3] = 0
+		csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6)
+		if err != nil {
+			return err
+		}
+		i.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6) NextLayerType() gopacket.LayerType {
+	switch i.TypeCode.Type() {
+	case ICMPv6TypeEchoRequest:
+		return LayerTypeICMPv6Echo
+	case ICMPv6TypeEchoReply:
+		return LayerTypeICMPv6Echo
+	case ICMPv6TypeRouterSolicitation:
+		return LayerTypeICMPv6RouterSolicitation
+	case ICMPv6TypeRouterAdvertisement:
+		return LayerTypeICMPv6RouterAdvertisement
+	case ICMPv6TypeNeighborSolicitation:
+		return LayerTypeICMPv6NeighborSolicitation
+	case ICMPv6TypeNeighborAdvertisement:
+		return LayerTypeICMPv6NeighborAdvertisement
+	case ICMPv6TypeRedirect:
+		return LayerTypeICMPv6Redirect
+	case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query
+		if len(i.Payload) > 20 { // Only payload size differs
+			return LayerTypeMLDv2MulticastListenerQuery
+		} else {
+			return LayerTypeMLDv1MulticastListenerQuery
+		}
+	case ICMPv6TypeMLDv1MulticastListenerDoneMessage:
+		return LayerTypeMLDv1MulticastListenerDone
+	case ICMPv6TypeMLDv1MulticastListenerReportMessage:
+		return LayerTypeMLDv1MulticastListenerReport
+	case ICMPv6TypeMLDv2MulticastListenerReportMessageV2:
+		return LayerTypeMLDv2MulticastListenerReport
+	}
+
+	return gopacket.LayerTypePayload
+}
+
+func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/google/gopacket/layers/icmp6msg.go
new file mode 100644
index 0000000..d9268db
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6msg.go
@@ -0,0 +1,578 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+// Based on RFC 4861
+
+// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option.
+type ICMPv6Opt uint8
+
+const (
+	_ ICMPv6Opt = iota
+
+	// ICMPv6OptSourceAddress contains the link-layer address of the sender of
+	// the packet. It is used in the Neighbor Solicitation, Router
+	// Solicitation, and Router Advertisement packets. Must be ignored for other
+	// Neighbor discovery messages.
+	ICMPv6OptSourceAddress
+
+	// ICMPv6OptTargetAddress contains the link-layer address of the target. It
+	// is used in Neighbor Advertisement and Redirect packets. Must be ignored
+	// for other Neighbor discovery messages.
+	ICMPv6OptTargetAddress
+
+	// ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes
+	// for Address Autoconfiguration. The Prefix Information option appears in
+	// Router Advertisement packets and MUST be silently ignored for other
+	// messages.
+	ICMPv6OptPrefixInfo
+
+	// ICMPv6OptRedirectedHeader is used in Redirect messages and contains all
+	// or part of the packet that is being redirected.
+	ICMPv6OptRedirectedHeader
+
+	// ICMPv6OptMTU is used in Router Advertisement messages to ensure that all
+	// nodes on a link use the same MTU value in those cases where the link MTU
+	// is not well known. This option MUST be silently ignored for other
+	// Neighbor Discovery messages.
+	ICMPv6OptMTU
+)
+
+// ICMPv6Echo represents the structure of a ping.
+type ICMPv6Echo struct {
+	BaseLayer
+	Identifier uint16
+	SeqNumber  uint16
+}
+
+// ICMPv6RouterSolicitation is sent by hosts to find routers.
+type ICMPv6RouterSolicitation struct {
+	BaseLayer
+	Options ICMPv6Options
+}
+
+// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation.
+type ICMPv6RouterAdvertisement struct {
+	BaseLayer
+	HopLimit       uint8
+	Flags          uint8
+	RouterLifetime uint16
+	ReachableTime  uint32
+	RetransTimer   uint32
+	Options        ICMPv6Options
+}
+
+// ICMPv6NeighborSolicitation is sent to request the link-layer address of a
+// target node.
+type ICMPv6NeighborSolicitation struct {
+	BaseLayer
+	TargetAddress net.IP
+	Options       ICMPv6Options
+}
+
+// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation.
+type ICMPv6NeighborAdvertisement struct {
+	BaseLayer
+	Flags         uint8
+	TargetAddress net.IP
+	Options       ICMPv6Options
+}
+
+// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node
+// on the path to a destination.
+type ICMPv6Redirect struct {
+	BaseLayer
+	TargetAddress      net.IP
+	DestinationAddress net.IP
+	Options            ICMPv6Options
+}
+
+// ICMPv6Option contains the type and data for a single option.
+type ICMPv6Option struct {
+	Type ICMPv6Opt
+	Data []byte
+}
+
+// ICMPv6Options is a slice of ICMPv6Option.
+type ICMPv6Options []ICMPv6Option
+
+func (i ICMPv6Opt) String() string {
+	switch i {
+	case ICMPv6OptSourceAddress:
+		return "SourceAddress"
+	case ICMPv6OptTargetAddress:
+		return "TargetAddress"
+	case ICMPv6OptPrefixInfo:
+		return "PrefixInfo"
+	case ICMPv6OptRedirectedHeader:
+		return "RedirectedHeader"
+	case ICMPv6OptMTU:
+		return "MTU"
+	default:
+		return fmt.Sprintf("Unknown(%d)", i)
+	}
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6Echo
+}
+
+// LayerType returns LayerTypeICMPv6Echo.
+func (i *ICMPv6Echo) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6Echo
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo")
+	}
+	i.Identifier = binary.BigEndian.Uint16(data[0:2])
+	i.SeqNumber = binary.BigEndian.Uint16(data[2:4])
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint16(buf, i.Identifier)
+	binary.BigEndian.PutUint16(buf[2:], i.SeqNumber)
+	return nil
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6RouterSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	// first 4 bytes are reserved followed by options
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation")
+	}
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[4:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6RouterSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6RouterAdvertisement.
+func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6RouterAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement")
+	}
+
+	i.HopLimit = uint8(data[0])
+	// M, O bit followed by 6 reserved bits
+	i.Flags = uint8(data[1])
+	i.RouterLifetime = binary.BigEndian.Uint16(data[2:4])
+	i.ReachableTime = binary.BigEndian.Uint32(data[4:8])
+	i.RetransTimer = binary.BigEndian.Uint32(data[8:12])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[12:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(12)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(i.HopLimit)
+	buf[1] = byte(i.Flags)
+	binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime)
+	binary.BigEndian.PutUint32(buf[4:], i.ReachableTime)
+	binary.BigEndian.PutUint32(buf[8:], i.RetransTimer)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6RouterAdvertisement
+}
+
+// ManagedAddressConfig is true when addresses are available via DHCPv6. If
+// set, the OtherConfig flag is redundant.
+func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool {
+	return i.Flags&0x80 != 0
+}
+
+// OtherConfig is true when there is other configuration information available
+// via DHCPv6. For example, DNS-related information.
+func (i *ICMPv6RouterAdvertisement) OtherConfig() bool {
+	return i.Flags&0x40 != 0
+}
+
+// LayerType returns LayerTypeICMPv6NeighborSolicitation.
+func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6NeighborSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation")
+	}
+
+	i.TargetAddress = net.IP(data[4:20])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	copy(buf[4:], i.TargetAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6NeighborSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6NeighborAdvertisement.
+func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement")
+	}
+
+	i.Flags = uint8(data[0])
+	i.TargetAddress = net.IP(data[4:20])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(i.Flags)
+	copy(buf[1:], lotsOfZeros[:3])
+	copy(buf[4:], i.TargetAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// Router indicates whether the sender is a router or not.
+func (i *ICMPv6NeighborAdvertisement) Router() bool {
+	return i.Flags&0x80 != 0
+}
+
+// Solicited indicates whether the advertisement was solicited or not.
+func (i *ICMPv6NeighborAdvertisement) Solicited() bool {
+	return i.Flags&0x40 != 0
+}
+
+// Override indicates whether the advertisement should Override an existing
+// cache entry.
+func (i *ICMPv6NeighborAdvertisement) Override() bool {
+	return i.Flags&0x20 != 0
+}
+
+// LayerType returns LayerTypeICMPv6Redirect.
+func (i *ICMPv6Redirect) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6Redirect
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 36 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect")
+	}
+
+	i.TargetAddress = net.IP(data[4:20])
+	i.DestinationAddress = net.IP(data[20:36])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[36:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(36)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	copy(buf[4:], i.TargetAddress)
+	copy(buf[20:], i.DestinationAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6Redirect
+}
+
+func (i ICMPv6Option) String() string {
+	hd := hex.EncodeToString(i.Data)
+	if len(hd) > 0 {
+		hd = " 0x" + hd
+	}
+
+	switch i.Type {
+	case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress:
+		return fmt.Sprintf("ICMPv6Option(%s:%v)",
+			i.Type,
+			net.HardwareAddr(i.Data))
+	case ICMPv6OptPrefixInfo:
+		if len(i.Data) == 30 {
+			prefixLen := uint8(i.Data[0])
+			onLink := (i.Data[1]&0x80 != 0)
+			autonomous := (i.Data[1]&0x40 != 0)
+			validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second
+			preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second
+
+			prefix := net.IP(i.Data[14:])
+
+			return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)",
+				i.Type,
+				prefix, prefixLen,
+				onLink, autonomous,
+				validLifetime, preferredLifetime)
+		}
+	case ICMPv6OptRedirectedHeader:
+		// could invoke IP decoder on data... probably best not to
+		break
+	case ICMPv6OptMTU:
+		if len(i.Data) == 6 {
+			return fmt.Sprintf("ICMPv6Option(%s:%v)",
+				i.Type,
+				binary.BigEndian.Uint32(i.Data[2:]))
+		}
+
+	}
+	return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd)
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	for len(data) > 0 {
+		if len(data) < 2 {
+			df.SetTruncated()
+			return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option")
+		}
+
+		// unit is 8 octets, convert to bytes
+		length := int(data[1]) * 8
+
+		if length == 0 {
+			df.SetTruncated()
+			return errors.New("ICMPv6 message option with length 0")
+		}
+
+		if len(data) < length {
+			df.SetTruncated()
+			return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length)
+		}
+
+		o := ICMPv6Option{
+			Type: ICMPv6Opt(data[0]),
+			Data: data[2:length],
+		}
+
+		// chop off option we just consumed
+		data = data[length:]
+
+		*i = append(*i, o)
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	for _, opt := range []ICMPv6Option(*i) {
+		length := len(opt.Data) + 2
+		buf, err := b.PrependBytes(length)
+		if err != nil {
+			return err
+		}
+
+		buf[0] = byte(opt.Type)
+		buf[1] = byte(length / 8)
+		copy(buf[2:], opt.Data)
+	}
+
+	return nil
+}
+
+func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6Echo{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6RouterSolicitation{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6RouterAdvertisement{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6NeighborSolicitation{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6NeighborAdvertisement{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6Redirect{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/google/gopacket/layers/igmp.go
new file mode 100644
index 0000000..d008415
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/igmp.go
@@ -0,0 +1,355 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+type IGMPType uint8
+
+const (
+	IGMPMembershipQuery    IGMPType = 0x11 // General or group specific query
+	IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report
+	IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report
+	IGMPLeaveGroup         IGMPType = 0x17 // Leave Group
+	IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report
+)
+
+// String conversions for IGMP message types
+func (i IGMPType) String() string {
+	switch i {
+	case IGMPMembershipQuery:
+		return "IGMP Membership Query"
+	case IGMPMembershipReportV1:
+		return "IGMPv1 Membership Report"
+	case IGMPMembershipReportV2:
+		return "IGMPv2 Membership Report"
+	case IGMPMembershipReportV3:
+		return "IGMPv3 Membership Report"
+	case IGMPLeaveGroup:
+		return "Leave Group"
+	default:
+		return ""
+	}
+}
+
+type IGMPv3GroupRecordType uint8
+
+const (
+	IGMPIsIn  IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x
+	IGMPIsEx  IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x
+	IGMPToIn  IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x
+	IGMPToEx  IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x
+	IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x
+	IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x
+)
+
+func (i IGMPv3GroupRecordType) String() string {
+	switch i {
+	case IGMPIsIn:
+		return "MODE_IS_INCLUDE"
+	case IGMPIsEx:
+		return "MODE_IS_EXCLUDE"
+	case IGMPToIn:
+		return "CHANGE_TO_INCLUDE_MODE"
+	case IGMPToEx:
+		return "CHANGE_TO_EXCLUDE_MODE"
+	case IGMPAllow:
+		return "ALLOW_NEW_SOURCES"
+	case IGMPBlock:
+		return "BLOCK_OLD_SOURCES"
+	default:
+		return ""
+	}
+}
+
+// IGMP represents an IGMPv3 message.
+type IGMP struct {
+	BaseLayer
+	Type                    IGMPType
+	MaxResponseTime         time.Duration
+	Checksum                uint16
+	GroupAddress            net.IP
+	SupressRouterProcessing bool
+	RobustnessValue         uint8
+	IntervalTime            time.Duration
+	SourceAddresses         []net.IP
+	NumberOfGroupRecords    uint16
+	NumberOfSources         uint16
+	GroupRecords            []IGMPv3GroupRecord
+	Version                 uint8 // IGMP protocol version
+}
+
+// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet.
+//
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |      Type     | Max Resp Time |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                         Group Address                         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type IGMPv1or2 struct {
+	BaseLayer
+	Type            IGMPType      // IGMP message type
+	MaxResponseTime time.Duration // meaningful only in Membership Query messages
+	Checksum        uint16        // 16-bit checksum of entire ip payload
+	GroupAddress    net.IP        // either 0 or an IP multicast address
+	Version         uint8
+}
+
+// decodeResponse dissects IGMPv1 or IGMPv2 packet.
+func (i *IGMPv1or2) decodeResponse(data []byte) error {
+	if len(data) < 8 {
+		return errors.New("IGMP packet too small")
+	}
+
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.GroupAddress = net.IP(data[4:8])
+
+	return nil
+}
+
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Type = 0x22  |    Reserved   |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |           Reserved            |  Number of Group Records (M)  |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [1]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [2]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [M]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Record Type  |  Aux Data Len |     Number of Sources (N)     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Multicast Address                       |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Source Address [1]                      |
+// +-                                                             -+
+// |                       Source Address [2]                      |
+// +-                                                             -+
+// |                       Source Address [N]                      |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                         Auxiliary Data                        .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message.
+type IGMPv3GroupRecord struct {
+	Type             IGMPv3GroupRecordType
+	AuxDataLen       uint8 // this should always be 0 as per IGMPv3 spec.
+	NumberOfSources  uint16
+	MulticastAddress net.IP
+	SourceAddresses  []net.IP
+	AuxData          uint32 // NOT USED
+}
+
+func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error {
+	if len(data) < 8 {
+		return errors.New("IGMPv3 Membership Report too small #1")
+	}
+
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8])
+
+	recordOffset := 8
+	for j := 0; j < int(i.NumberOfGroupRecords); j++ {
+		if len(data) < recordOffset+8 {
+			return errors.New("IGMPv3 Membership Report too small #2")
+		}
+
+		var gr IGMPv3GroupRecord
+		gr.Type = IGMPv3GroupRecordType(data[recordOffset])
+		gr.AuxDataLen = data[recordOffset+1]
+		gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4])
+		gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8])
+
+		if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 {
+			return errors.New("IGMPv3 Membership Report too small #3")
+		}
+
+		// append source address records.
+		for i := 0; i < int(gr.NumberOfSources); i++ {
+			sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4])
+			gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr)
+		}
+
+		i.GroupRecords = append(i.GroupRecords, gr)
+		recordOffset += 8 + 4*int(gr.NumberOfSources)
+	}
+	return nil
+}
+
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Type = 0x11  | Max Resp Code |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                         Group Address                         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Resv  |S| QRV |     QQIC      |     Number of Sources (N)     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Source Address [1]                      |
+// +-                                                             -+
+// |                       Source Address [2]                      |
+// +-                              .                              -+
+// |                       Source Address [N]                      |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11
+func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error {
+	if len(data) < 12 {
+		return errors.New("IGMPv3 Membership Query too small #1")
+	}
+
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.SupressRouterProcessing = data[8]&0x8 != 0
+	i.GroupAddress = net.IP(data[4:8])
+	i.RobustnessValue = data[8] & 0x7
+	i.IntervalTime = igmpTimeDecode(data[9])
+	i.NumberOfSources = binary.BigEndian.Uint16(data[10:12])
+
+	if len(data) < 12+int(i.NumberOfSources)*4 {
+		return errors.New("IGMPv3 Membership Query too small #2")
+	}
+
+	for j := 0; j < int(i.NumberOfSources); j++ {
+		i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4]))
+	}
+
+	return nil
+}
+
+// igmpTimeDecode decodes the duration created by the given byte, using the
+// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1.
+func igmpTimeDecode(t uint8) time.Duration {
+	if t&0x80 == 0 {
+		return time.Millisecond * 100 * time.Duration(t)
+	}
+	mant := (t & 0x70) >> 4
+	exp := t & 0x0F
+	return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3))
+}
+
+// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats.
+func (i *IGMP) LayerType() gopacket.LayerType      { return LayerTypeIGMP }
+func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+
+func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		return errors.New("IGMP Packet too small")
+	}
+
+	i.Type = IGMPType(data[0])
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.GroupAddress = net.IP(data[4:8])
+
+	return nil
+}
+
+func (i *IGMPv1or2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+func (i *IGMPv1or2) CanDecode() gopacket.LayerClass {
+	return LayerTypeIGMP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 1 {
+		return errors.New("IGMP packet is too small")
+	}
+
+	// common IGMP header values between versions 1..3 of IGMP specification..
+	i.Type = IGMPType(data[0])
+
+	switch i.Type {
+	case IGMPMembershipQuery:
+		i.decodeIGMPv3MembershipQuery(data)
+	case IGMPMembershipReportV3:
+		i.decodeIGMPv3MembershipReport(data)
+	default:
+		return errors.New("unsupported IGMP type")
+	}
+
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *IGMP) CanDecode() gopacket.LayerClass {
+	return LayerTypeIGMP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *IGMP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the
+// IGMP type are performed against byte[0], logic then iniitalizes and
+// passes the appropriate struct (IGMP or IGMPv1or2) to
+// decodingLayerDecoder.
+func decodeIGMP(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 1 {
+		return errors.New("IGMP packet is too small")
+	}
+
+	// byte 0 contains IGMP message type.
+	switch IGMPType(data[0]) {
+	case IGMPMembershipQuery:
+		// IGMPv3 Membership Query payload is >= 12
+		if len(data) >= 12 {
+			i := &IGMP{Version: 3}
+			return decodingLayerDecoder(i, data, p)
+		} else if len(data) == 8 {
+			i := &IGMPv1or2{}
+			if data[1] == 0x00 {
+				i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0
+			} else {
+				i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0
+			}
+
+			return decodingLayerDecoder(i, data, p)
+		}
+	case IGMPMembershipReportV3:
+		i := &IGMP{Version: 3}
+		return decodingLayerDecoder(i, data, p)
+	case IGMPMembershipReportV1:
+		i := &IGMPv1or2{Version: 1}
+		return decodingLayerDecoder(i, data, p)
+	case IGMPLeaveGroup, IGMPMembershipReportV2:
+		// leave group and Query Report v2 used in IGMPv2 only.
+		i := &IGMPv1or2{Version: 2}
+		return decodingLayerDecoder(i, data, p)
+	default:
+	}
+
+	return errors.New("Unable to determine IGMP type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/google/gopacket/layers/ip4.go
new file mode 100644
index 0000000..2b3c0c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip4.go
@@ -0,0 +1,325 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+type IPv4Flag uint8
+
+const (
+	IPv4EvilBit       IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;)
+	IPv4DontFragment  IPv4Flag = 1 << 1
+	IPv4MoreFragments IPv4Flag = 1 << 0
+)
+
+func (f IPv4Flag) String() string {
+	var s []string
+	if f&IPv4EvilBit != 0 {
+		s = append(s, "Evil")
+	}
+	if f&IPv4DontFragment != 0 {
+		s = append(s, "DF")
+	}
+	if f&IPv4MoreFragments != 0 {
+		s = append(s, "MF")
+	}
+	return strings.Join(s, "|")
+}
+
+// IPv4 is the header of an IP packet.
+type IPv4 struct {
+	BaseLayer
+	Version    uint8
+	IHL        uint8
+	TOS        uint8
+	Length     uint16
+	Id         uint16
+	Flags      IPv4Flag
+	FragOffset uint16
+	TTL        uint8
+	Protocol   IPProtocol
+	Checksum   uint16
+	SrcIP      net.IP
+	DstIP      net.IP
+	Options    []IPv4Option
+	Padding    []byte
+}
+
+// LayerType returns LayerTypeIPv4
+func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 }
+func (i *IPv4) NetworkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP)
+}
+
+type IPv4Option struct {
+	OptionType   uint8
+	OptionLength uint8
+	OptionData   []byte
+}
+
+func (i IPv4Option) String() string {
+	return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData)
+}
+
+// for the current ipv4 options, return the number of bytes (including
+// padding that the options used)
+func (ip *IPv4) getIPv4OptionSize() uint8 {
+	optionSize := uint8(0)
+	for _, opt := range ip.Options {
+		switch opt.OptionType {
+		case 0:
+			// this is the end of option lists
+			optionSize++
+		case 1:
+			// this is the padding
+			optionSize++
+		default:
+			optionSize += opt.OptionLength
+
+		}
+	}
+	// make sure the options are aligned to 32 bit boundary
+	if (optionSize % 4) != 0 {
+		optionSize += 4 - (optionSize % 4)
+	}
+	return optionSize
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	optionLength := ip.getIPv4OptionSize()
+	bytes, err := b.PrependBytes(20 + int(optionLength))
+	if err != nil {
+		return err
+	}
+	if opts.FixLengths {
+		ip.IHL = 5 + (optionLength / 4)
+		ip.Length = uint16(len(b.Bytes()))
+	}
+	bytes[0] = (ip.Version << 4) | ip.IHL
+	bytes[1] = ip.TOS
+	binary.BigEndian.PutUint16(bytes[2:], ip.Length)
+	binary.BigEndian.PutUint16(bytes[4:], ip.Id)
+	binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags())
+	bytes[8] = ip.TTL
+	bytes[9] = byte(ip.Protocol)
+	if err := ip.AddressTo4(); err != nil {
+		return err
+	}
+	copy(bytes[12:16], ip.SrcIP)
+	copy(bytes[16:20], ip.DstIP)
+
+	curLocation := 20
+	// Now, we will encode the options
+	for _, opt := range ip.Options {
+		switch opt.OptionType {
+		case 0:
+			// this is the end of option lists
+			bytes[curLocation] = 0
+			curLocation++
+		case 1:
+			// this is the padding
+			bytes[curLocation] = 1
+			curLocation++
+		default:
+			bytes[curLocation] = opt.OptionType
+			bytes[curLocation+1] = opt.OptionLength
+
+			// sanity checking to protect us from buffer overrun
+			if len(opt.OptionData) > int(opt.OptionLength-2) {
+				return errors.New("option length is smaller than length of option data")
+			}
+			copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData)
+			curLocation += int(opt.OptionLength)
+		}
+	}
+
+	if opts.ComputeChecksums {
+		ip.Checksum = checksum(bytes)
+	}
+	binary.BigEndian.PutUint16(bytes[10:], ip.Checksum)
+	return nil
+}
+
+func checksum(bytes []byte) uint16 {
+	// Clear checksum bytes
+	bytes[10] = 0
+	bytes[11] = 0
+
+	// Compute checksum
+	var csum uint32
+	for i := 0; i < len(bytes); i += 2 {
+		csum += uint32(bytes[i]) << 8
+		csum += uint32(bytes[i+1])
+	}
+	for {
+		// Break when sum is less or equals to 0xFFFF
+		if csum <= 65535 {
+			break
+		}
+		// Add carry to the sum
+		csum = (csum >> 16) + uint32(uint16(csum))
+	}
+	// Flip all the bits
+	return ^uint16(csum)
+}
+
+func (ip *IPv4) flagsfrags() (ff uint16) {
+	ff |= uint16(ip.Flags) << 13
+	ff |= ip.FragOffset
+	return
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data))
+	}
+	flagsfrags := binary.BigEndian.Uint16(data[6:8])
+
+	ip.Version = uint8(data[0]) >> 4
+	ip.IHL = uint8(data[0]) & 0x0F
+	ip.TOS = data[1]
+	ip.Length = binary.BigEndian.Uint16(data[2:4])
+	ip.Id = binary.BigEndian.Uint16(data[4:6])
+	ip.Flags = IPv4Flag(flagsfrags >> 13)
+	ip.FragOffset = flagsfrags & 0x1FFF
+	ip.TTL = data[8]
+	ip.Protocol = IPProtocol(data[9])
+	ip.Checksum = binary.BigEndian.Uint16(data[10:12])
+	ip.SrcIP = data[12:16]
+	ip.DstIP = data[16:20]
+	ip.Options = ip.Options[:0]
+	ip.Padding = nil
+	// Set up an initial guess for contents/payload... we'll reset these soon.
+	ip.BaseLayer = BaseLayer{Contents: data}
+
+	// This code is added for the following enviroment:
+	// * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver )
+	if ip.Length == 0 {
+		// If using TSO(TCP Segmentation Offload), length is zero.
+		// The actual packet length is the length of data.
+		ip.Length = uint16(len(data))
+	}
+
+	if ip.Length < 20 {
+		return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length)
+	} else if ip.IHL < 5 {
+		return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL)
+	} else if int(ip.IHL*4) > int(ip.Length) {
+		return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length)
+	}
+	if cmp := len(data) - int(ip.Length); cmp > 0 {
+		data = data[:ip.Length]
+	} else if cmp < 0 {
+		df.SetTruncated()
+		if int(ip.IHL)*4 > len(data) {
+			return errors.New("Not all IP header bytes available")
+		}
+	}
+	ip.Contents = data[:ip.IHL*4]
+	ip.Payload = data[ip.IHL*4:]
+	// From here on, data contains the header options.
+	data = data[20 : ip.IHL*4]
+	// Pull out IP options
+	for len(data) > 0 {
+		if ip.Options == nil {
+			// Pre-allocate to avoid growing the slice too much.
+			ip.Options = make([]IPv4Option, 0, 4)
+		}
+		opt := IPv4Option{OptionType: data[0]}
+		switch opt.OptionType {
+		case 0: // End of options
+			opt.OptionLength = 1
+			ip.Options = append(ip.Options, opt)
+			ip.Padding = data[1:]
+			return nil
+		case 1: // 1 byte padding
+			opt.OptionLength = 1
+			data = data[1:]
+			ip.Options = append(ip.Options, opt)
+		default:
+			if len(data) < 2 {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data))
+			}
+			opt.OptionLength = data[1]
+			if len(data) < int(opt.OptionLength) {
+				df.SetTruncated()
+				return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength)
+			}
+			if opt.OptionLength <= 2 {
+				return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength)
+			}
+			opt.OptionData = data[2:opt.OptionLength]
+			data = data[opt.OptionLength:]
+			ip.Options = append(ip.Options, opt)
+		}
+	}
+	return nil
+}
+
+func (i *IPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeIPv4
+}
+
+func (i *IPv4) NextLayerType() gopacket.LayerType {
+	if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 {
+		return gopacket.LayerTypeFragment
+	}
+	return i.Protocol.LayerType()
+}
+
+func decodeIPv4(data []byte, p gopacket.PacketBuilder) error {
+	ip := &IPv4{}
+	err := ip.DecodeFromBytes(data, p)
+	p.AddLayer(ip)
+	p.SetNetworkLayer(ip)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(ip.NextLayerType())
+}
+
+func checkIPv4Address(addr net.IP) (net.IP, error) {
+	if c := addr.To4(); c != nil {
+		return c, nil
+	}
+	if len(addr) == net.IPv6len {
+		return nil, errors.New("address is IPv6")
+	}
+	return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len)
+}
+
+func (ip *IPv4) AddressTo4() error {
+	var src, dst net.IP
+
+	if addr, err := checkIPv4Address(ip.SrcIP); err != nil {
+		return fmt.Errorf("Invalid source IPv4 address (%s)", err)
+	} else {
+		src = addr
+	}
+	if addr, err := checkIPv4Address(ip.DstIP); err != nil {
+		return fmt.Errorf("Invalid destination IPv4 address (%s)", err)
+	} else {
+		dst = addr
+	}
+	ip.SrcIP = src
+	ip.DstIP = dst
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/google/gopacket/layers/ip6.go
new file mode 100644
index 0000000..70e9c8d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip6.go
@@ -0,0 +1,707 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// IPv6HopByHopOptionJumbogram code as defined in RFC 2675
+	IPv6HopByHopOptionJumbogram = 0xC2
+)
+
+const (
+	ipv6MaxPayloadLength = 65535
+)
+
+// IPv6 is the layer for the IPv6 header.
+type IPv6 struct {
+	// http://www.networksorcery.com/enp/protocol/ipv6.htm
+	BaseLayer
+	Version      uint8
+	TrafficClass uint8
+	FlowLabel    uint32
+	Length       uint16
+	NextHeader   IPProtocol
+	HopLimit     uint8
+	SrcIP        net.IP
+	DstIP        net.IP
+	HopByHop     *IPv6HopByHop
+	// hbh will be pointed to by HopByHop if that layer exists.
+	hbh IPv6HopByHop
+}
+
+// LayerType returns LayerTypeIPv6
+func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
+
+// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP)
+func (ipv6 *IPv6) NetworkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP)
+}
+
+// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found
+func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) {
+	var tlv *IPv6HopByHopOption
+
+	for _, t := range hopopts.Options {
+		if t.OptionType == IPv6HopByHopOptionJumbogram {
+			tlv = t
+			break
+		}
+	}
+	if tlv == nil {
+		// Not found
+		return 0, false, nil
+	}
+	if len(tlv.OptionData) != 4 {
+		return 0, false, errors.New("Jumbo length TLV data must have length 4")
+	}
+	l := binary.BigEndian.Uint32(tlv.OptionData)
+	if l <= ipv6MaxPayloadLength {
+		return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1)
+	}
+	// Found
+	return l, true, nil
+}
+
+// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist
+// (if necessary add hop-by-hop header)
+func addIPv6JumboOption(ip6 *IPv6) {
+	var tlv *IPv6HopByHopOption
+
+	if ip6.HopByHop == nil {
+		// Add IPv6 HopByHop
+		ip6.HopByHop = &IPv6HopByHop{}
+		ip6.HopByHop.NextHeader = ip6.NextHeader
+		ip6.HopByHop.HeaderLength = 0
+		ip6.NextHeader = IPProtocolIPv6HopByHop
+	}
+	for _, t := range ip6.HopByHop.Options {
+		if t.OptionType == IPv6HopByHopOptionJumbogram {
+			tlv = t
+			break
+		}
+	}
+	if tlv == nil {
+		// Add Jumbo TLV
+		tlv = &IPv6HopByHopOption{}
+		ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv)
+	}
+	tlv.SetJumboLength(0)
+}
+
+// Set jumbo length in serialized IPv6 payload (starting with HopByHop header)
+func setIPv6PayloadJumboLength(hbh []byte) error {
+	pLen := len(hbh)
+	if pLen < 8 {
+		//HopByHop is minimum 8 bytes
+		return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen)
+	}
+	hbhLen := int((hbh[1] + 1) * 8)
+	if hbhLen > pLen {
+		return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen)
+	}
+	offset := 2 //start with options
+	for offset < hbhLen {
+		opt := hbh[offset]
+		if opt == 0 {
+			//Pad1
+			offset++
+			continue
+		}
+		optLen := int(hbh[offset+1])
+		if opt == IPv6HopByHopOptionJumbogram {
+			if optLen == 4 {
+				binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen))
+				return nil
+			}
+			return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen)
+		}
+		offset += 2 + optLen
+	}
+	return errors.New("Jumbo TLV not found")
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var jumbo bool
+	var err error
+
+	payload := b.Bytes()
+	pLen := len(payload)
+	if pLen > ipv6MaxPayloadLength {
+		jumbo = true
+		if opts.FixLengths {
+			// We need to set the length later because the hop-by-hop header may
+			// not exist or else need padding, so pLen may yet change
+			addIPv6JumboOption(ipv6)
+		} else if ipv6.HopByHop == nil {
+			return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen)
+		} else {
+			_, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+			if err != nil {
+				return err
+			}
+			if !ok {
+				return errors.New("Missing jumbo length hop-by-hop option")
+			}
+		}
+	}
+
+	hbhAlreadySerialized := false
+	if ipv6.HopByHop != nil {
+		for _, l := range b.Layers() {
+			if l == LayerTypeIPv6HopByHop {
+				hbhAlreadySerialized = true
+				break
+			}
+		}
+	}
+	if ipv6.HopByHop != nil && !hbhAlreadySerialized {
+		if ipv6.NextHeader != IPProtocolIPv6HopByHop {
+			// Just fix it instead of throwing an error
+			ipv6.NextHeader = IPProtocolIPv6HopByHop
+		}
+		err = ipv6.HopByHop.SerializeTo(b, opts)
+		if err != nil {
+			return err
+		}
+		payload = b.Bytes()
+		pLen = len(payload)
+		if opts.FixLengths && jumbo {
+			err := setIPv6PayloadJumboLength(payload)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if !jumbo && pLen > ipv6MaxPayloadLength {
+		return errors.New("Cannot fit payload into IPv6 header")
+	}
+	bytes, err := b.PrependBytes(40)
+	if err != nil {
+		return err
+	}
+	bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4)
+	bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16)
+	binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel))
+	if opts.FixLengths {
+		if jumbo {
+			ipv6.Length = 0
+		} else {
+			ipv6.Length = uint16(pLen)
+		}
+	}
+	binary.BigEndian.PutUint16(bytes[4:], ipv6.Length)
+	bytes[6] = byte(ipv6.NextHeader)
+	bytes[7] = byte(ipv6.HopLimit)
+	if err := ipv6.AddressTo16(); err != nil {
+		return err
+	}
+	copy(bytes[8:], ipv6.SrcIP)
+	copy(bytes[24:], ipv6.DstIP)
+	return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 40 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data))
+	}
+	ipv6.Version = uint8(data[0]) >> 4
+	ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
+	ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
+	ipv6.Length = binary.BigEndian.Uint16(data[4:6])
+	ipv6.NextHeader = IPProtocol(data[6])
+	ipv6.HopLimit = data[7]
+	ipv6.SrcIP = data[8:24]
+	ipv6.DstIP = data[24:40]
+	ipv6.HopByHop = nil
+	ipv6.BaseLayer = BaseLayer{data[:40], data[40:]}
+
+	// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
+	// options are crucial for understanding what's actually happening per packet.
+	if ipv6.NextHeader == IPProtocolIPv6HopByHop {
+		err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df)
+		if err != nil {
+			return err
+		}
+		ipv6.HopByHop = &ipv6.hbh
+		pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+		if err != nil {
+			return err
+		}
+		if jumbo && ipv6.Length == 0 {
+			pEnd := int(pEnd)
+			if pEnd > len(ipv6.Payload) {
+				df.SetTruncated()
+				pEnd = len(ipv6.Payload)
+			}
+			ipv6.Payload = ipv6.Payload[:pEnd]
+			return nil
+		} else if jumbo && ipv6.Length != 0 {
+			return errors.New("IPv6 has jumbo length and IPv6 length is not 0")
+		} else if !jumbo && ipv6.Length == 0 {
+			return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option")
+		} else {
+			ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:]
+		}
+	}
+
+	if ipv6.Length == 0 {
+		return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader)
+	}
+
+	pEnd := int(ipv6.Length)
+	if pEnd > len(ipv6.Payload) {
+		df.SetTruncated()
+		pEnd = len(ipv6.Payload)
+	}
+	ipv6.Payload = ipv6.Payload[:pEnd]
+
+	return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeIPv6
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) NextLayerType() gopacket.LayerType {
+	if ipv6.HopByHop != nil {
+		return ipv6.HopByHop.NextHeader.LayerType()
+	}
+	return ipv6.NextHeader.LayerType()
+}
+
+func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
+	ip6 := &IPv6{}
+	err := ip6.DecodeFromBytes(data, p)
+	p.AddLayer(ip6)
+	p.SetNetworkLayer(ip6)
+	if ip6.HopByHop != nil {
+		p.AddLayer(ip6.HopByHop)
+	}
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(ip6.NextLayerType())
+}
+
+type ipv6HeaderTLVOption struct {
+	OptionType, OptionLength uint8
+	ActualLength             int
+	OptionData               []byte
+	OptionAlignment          [2]uint8 // Xn+Y = [2]uint8{X, Y}
+}
+
+func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int {
+	if fixLengths {
+		h.OptionLength = uint8(len(h.OptionData))
+	}
+	length := int(h.OptionLength) + 2
+	if !dryrun {
+		data[0] = h.OptionType
+		data[1] = h.OptionLength
+		copy(data[2:], h.OptionData)
+	}
+	return length
+}
+
+func decodeIPv6HeaderTLVOption(data []byte) (h *ipv6HeaderTLVOption) {
+	h = &ipv6HeaderTLVOption{}
+	if data[0] == 0 {
+		h.ActualLength = 1
+		return
+	}
+	h.OptionType = data[0]
+	h.OptionLength = data[1]
+	h.ActualLength = int(h.OptionLength) + 2
+	h.OptionData = data[2:h.ActualLength]
+	return
+}
+
+func serializeTLVOptionPadding(data []byte, padLength int) {
+	if padLength <= 0 {
+		return
+	}
+	if padLength == 1 {
+		data[0] = 0x0
+		return
+	}
+	tlvLength := uint8(padLength) - 2
+	data[0] = 0x1
+	data[1] = tlvLength
+	if tlvLength != 0 {
+		for k := range data[2:] {
+			data[k+2] = 0x0
+		}
+	}
+	return
+}
+
+// If buf is 'nil' do a serialize dry run
+func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int {
+	var l int
+
+	dryrun := buf == nil
+	length := 2
+	for _, opt := range options {
+		if fixLengths {
+			x := int(opt.OptionAlignment[0])
+			y := int(opt.OptionAlignment[1])
+			if x != 0 {
+				n := length / x
+				offset := x*n + y
+				if offset < length {
+					offset += x
+				}
+				if length != offset {
+					pad := offset - length
+					if !dryrun {
+						serializeTLVOptionPadding(buf[length-2:], pad)
+					}
+					length += pad
+				}
+			}
+		}
+		if dryrun {
+			l = opt.serializeTo(nil, fixLengths, true)
+		} else {
+			l = opt.serializeTo(buf[length-2:], fixLengths, false)
+		}
+		length += l
+	}
+	if fixLengths {
+		pad := length % 8
+		if pad != 0 {
+			if !dryrun {
+				serializeTLVOptionPadding(buf[length-2:], pad)
+			}
+			length += pad
+		}
+	}
+	return length - 2
+}
+
+type ipv6ExtensionBase struct {
+	BaseLayer
+	NextHeader   IPProtocol
+	HeaderLength uint8
+	ActualLength int
+}
+
+func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data))
+	}
+	i.NextHeader = IPProtocol(data[0])
+	i.HeaderLength = data[1]
+	i.ActualLength = int(i.HeaderLength)*8 + 8
+	if len(data) < i.ActualLength {
+		return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength)
+	}
+	i.Contents = data[:i.ActualLength]
+	i.Payload = data[i.ActualLength:]
+	return
+}
+
+// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
+// extensions.  You can use it with a DecodingLayerParser to handle IPv6 stacks
+// which may or may not have extensions.
+type IPv6ExtensionSkipper struct {
+	NextHeader IPProtocol
+	BaseLayer
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	extension, err := decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
+	i.NextHeader = extension.NextHeader
+	return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
+	return LayerClassIPv6Extension
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
+	return i.NextHeader.LayerType()
+}
+
+// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
+type IPv6HopByHopOption ipv6HeaderTLVOption
+
+// IPv6HopByHop is the IPv6 hop-by-hop extension.
+type IPv6HopByHop struct {
+	ipv6ExtensionBase
+	Options []*IPv6HopByHopOption
+}
+
+// LayerType returns LayerTypeIPv6HopByHop.
+func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
+
+// SerializeTo implementation according to gopacket.SerializableLayer
+func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var bytes []byte
+	var err error
+
+	o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+	for _, v := range i.Options {
+		o = append(o, (*ipv6HeaderTLVOption)(v))
+	}
+
+	l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+	bytes, err = b.PrependBytes(l)
+	if err != nil {
+		return err
+	}
+	serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+	length := len(bytes) + 2
+	if length%8 != 0 {
+		return errors.New("IPv6HopByHop actual length must be multiple of 8")
+	}
+	bytes, err = b.PrependBytes(2)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(i.NextHeader)
+	if opts.FixLengths {
+		i.HeaderLength = uint8((length / 8) - 1)
+	}
+	bytes[1] = uint8(i.HeaderLength)
+	return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var err error
+	i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	offset := 2
+	for offset < i.ActualLength {
+		opt := decodeIPv6HeaderTLVOption(data[offset:])
+		i.Options = append(i.Options, (*IPv6HopByHopOption)(opt))
+		offset += opt.ActualLength
+	}
+	return nil
+}
+
+func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPv6HopByHop{}
+	err := i.DecodeFromBytes(data, p)
+	p.AddLayer(i)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(i.NextHeader)
+}
+
+// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length
+func (o *IPv6HopByHopOption) SetJumboLength(len uint32) {
+	o.OptionType = IPv6HopByHopOptionJumbogram
+	o.OptionLength = 4
+	o.ActualLength = 6
+	if o.OptionData == nil {
+		o.OptionData = make([]byte, 4)
+	}
+	binary.BigEndian.PutUint32(o.OptionData, len)
+	o.OptionAlignment = [2]uint8{4, 2}
+}
+
+// IPv6Routing is the IPv6 routing extension.
+type IPv6Routing struct {
+	ipv6ExtensionBase
+	RoutingType  uint8
+	SegmentsLeft uint8
+	// This segment is supposed to be zero according to RFC2460, the second set of
+	// 4 bytes in the extension.
+	Reserved []byte
+	// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
+	// set only if RoutingType == 0.
+	SourceRoutingIPs []net.IP
+}
+
+// LayerType returns LayerTypeIPv6Routing.
+func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
+
+func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
+	base, err := decodeIPv6ExtensionBase(data, p)
+	if err != nil {
+		return err
+	}
+	i := &IPv6Routing{
+		ipv6ExtensionBase: base,
+		RoutingType:       data[2],
+		SegmentsLeft:      data[3],
+		Reserved:          data[4:8],
+	}
+	switch i.RoutingType {
+	case 0: // Source routing
+		if (i.ActualLength-8)%16 != 0 {
+			return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength)
+		}
+		for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
+			i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
+		}
+	default:
+		return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType)
+	}
+	p.AddLayer(i)
+	return p.NextDecoder(i.NextHeader)
+}
+
+// IPv6Fragment is the IPv6 fragment header, used for packet
+// fragmentation/defragmentation.
+type IPv6Fragment struct {
+	BaseLayer
+	NextHeader IPProtocol
+	// Reserved1 is bits [8-16), from least to most significant, 0-indexed
+	Reserved1      uint8
+	FragmentOffset uint16
+	// Reserved2 is bits [29-31), from least to most significant, 0-indexed
+	Reserved2      uint8
+	MoreFragments  bool
+	Identification uint32
+}
+
+// LayerType returns LayerTypeIPv6Fragment.
+func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
+
+func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 8 {
+		p.SetTruncated()
+		return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data))
+	}
+	i := &IPv6Fragment{
+		BaseLayer:      BaseLayer{data[:8], data[8:]},
+		NextHeader:     IPProtocol(data[0]),
+		Reserved1:      data[1],
+		FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
+		Reserved2:      data[3] & 0x6 >> 1,
+		MoreFragments:  data[3]&0x1 != 0,
+		Identification: binary.BigEndian.Uint32(data[4:8]),
+	}
+	p.AddLayer(i)
+	return p.NextDecoder(gopacket.DecodeFragment)
+}
+
+// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
+type IPv6DestinationOption ipv6HeaderTLVOption
+
+// IPv6Destination is the IPv6 destination options header.
+type IPv6Destination struct {
+	ipv6ExtensionBase
+	Options []*IPv6DestinationOption
+}
+
+// LayerType returns LayerTypeIPv6Destination.
+func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var err error
+	i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	offset := 2
+	for offset < i.ActualLength {
+		opt := decodeIPv6HeaderTLVOption(data[offset:])
+		i.Options = append(i.Options, (*IPv6DestinationOption)(opt))
+		offset += opt.ActualLength
+	}
+	return nil
+}
+
+func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPv6Destination{}
+	err := i.DecodeFromBytes(data, p)
+	p.AddLayer(i)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(i.NextHeader)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var bytes []byte
+	var err error
+
+	o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+	for _, v := range i.Options {
+		o = append(o, (*ipv6HeaderTLVOption)(v))
+	}
+
+	l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+	bytes, err = b.PrependBytes(l)
+	if err != nil {
+		return err
+	}
+	serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+	length := len(bytes) + 2
+	if length%8 != 0 {
+		return errors.New("IPv6Destination actual length must be multiple of 8")
+	}
+	bytes, err = b.PrependBytes(2)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(i.NextHeader)
+	if opts.FixLengths {
+		i.HeaderLength = uint8((length / 8) - 1)
+	}
+	bytes[1] = uint8(i.HeaderLength)
+	return nil
+}
+
+func checkIPv6Address(addr net.IP) error {
+	if len(addr) == net.IPv6len {
+		return nil
+	}
+	if len(addr) == net.IPv4len {
+		return errors.New("address is IPv4")
+	}
+	return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len)
+}
+
+// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses)
+func (ipv6 *IPv6) AddressTo16() error {
+	if err := checkIPv6Address(ipv6.SrcIP); err != nil {
+		return fmt.Errorf("Invalid source IPv6 address (%s)", err)
+	}
+	if err := checkIPv6Address(ipv6.DstIP); err != nil {
+		return fmt.Errorf("Invalid destination IPv6 address (%s)", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/google/gopacket/layers/ipsec.go
new file mode 100644
index 0000000..19163fa
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ipsec.go
@@ -0,0 +1,68 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// IPSecAH is the authentication header for IPv4/6 defined in
+// http://tools.ietf.org/html/rfc2402
+type IPSecAH struct {
+	// While the auth header can be used for both IPv4 and v6, its format is that of
+	// an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase
+	// to build it.
+	ipv6ExtensionBase
+	Reserved           uint16
+	SPI, Seq           uint32
+	AuthenticationData []byte
+}
+
+// LayerType returns LayerTypeIPSecAH.
+func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH }
+
+func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPSecAH{
+		ipv6ExtensionBase: ipv6ExtensionBase{
+			NextHeader:   IPProtocol(data[0]),
+			HeaderLength: data[1],
+		},
+		Reserved: binary.BigEndian.Uint16(data[2:4]),
+		SPI:      binary.BigEndian.Uint32(data[4:8]),
+		Seq:      binary.BigEndian.Uint32(data[8:12]),
+	}
+	i.ActualLength = (int(i.HeaderLength) + 2) * 4
+	i.AuthenticationData = data[12:i.ActualLength]
+	i.Contents = data[:i.ActualLength]
+	i.Payload = data[i.ActualLength:]
+	p.AddLayer(i)
+	return p.NextDecoder(i.NextHeader)
+}
+
+// IPSecESP is the encapsulating security payload defined in
+// http://tools.ietf.org/html/rfc2406
+type IPSecESP struct {
+	BaseLayer
+	SPI, Seq uint32
+	// Encrypted contains the encrypted set of bytes sent in an ESP
+	Encrypted []byte
+}
+
+// LayerType returns LayerTypeIPSecESP.
+func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP }
+
+func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPSecESP{
+		BaseLayer: BaseLayer{data, nil},
+		SPI:       binary.BigEndian.Uint32(data[:4]),
+		Seq:       binary.BigEndian.Uint32(data[4:8]),
+		Encrypted: data[8:],
+	}
+	p.AddLayer(i)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/google/gopacket/layers/layertypes.go
new file mode 100644
index 0000000..56fdb5a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/layertypes.go
@@ -0,0 +1,218 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+var (
+	LayerTypeARP                          = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)})
+	LayerTypeCiscoDiscovery               = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)})
+	LayerTypeEthernetCTP                  = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)})
+	LayerTypeEthernetCTPForwardData       = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil})
+	LayerTypeEthernetCTPReply             = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil})
+	LayerTypeDot1Q                        = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)})
+	LayerTypeEtherIP                      = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)})
+	LayerTypeEthernet                     = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)})
+	LayerTypeGRE                          = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)})
+	LayerTypeICMPv4                       = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)})
+	LayerTypeIPv4                         = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)})
+	LayerTypeIPv6                         = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)})
+	LayerTypeLLC                          = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)})
+	LayerTypeSNAP                         = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)})
+	LayerTypeMPLS                         = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)})
+	LayerTypePPP                          = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)})
+	LayerTypePPPoE                        = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)})
+	LayerTypeRUDP                         = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)})
+	LayerTypeSCTP                         = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)})
+	LayerTypeSCTPUnknownChunkType         = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil})
+	LayerTypeSCTPData                     = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil})
+	LayerTypeSCTPInit                     = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil})
+	LayerTypeSCTPSack                     = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil})
+	LayerTypeSCTPHeartbeat                = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil})
+	LayerTypeSCTPError                    = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil})
+	LayerTypeSCTPShutdown                 = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil})
+	LayerTypeSCTPShutdownAck              = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil})
+	LayerTypeSCTPCookieEcho               = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil})
+	LayerTypeSCTPEmptyLayer               = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil})
+	LayerTypeSCTPInitAck                  = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil})
+	LayerTypeSCTPHeartbeatAck             = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil})
+	LayerTypeSCTPAbort                    = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil})
+	LayerTypeSCTPShutdownComplete         = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil})
+	LayerTypeSCTPCookieAck                = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil})
+	LayerTypeTCP                          = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)})
+	LayerTypeUDP                          = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)})
+	LayerTypeIPv6HopByHop                 = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)})
+	LayerTypeIPv6Routing                  = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)})
+	LayerTypeIPv6Fragment                 = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)})
+	LayerTypeIPv6Destination              = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)})
+	LayerTypeIPSecAH                      = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)})
+	LayerTypeIPSecESP                     = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)})
+	LayerTypeUDPLite                      = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)})
+	LayerTypeFDDI                         = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)})
+	LayerTypeLoopback                     = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)})
+	LayerTypeEAP                          = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)})
+	LayerTypeEAPOL                        = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)})
+	LayerTypeICMPv6                       = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)})
+	LayerTypeLinkLayerDiscovery           = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)})
+	LayerTypeCiscoDiscoveryInfo           = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)})
+	LayerTypeLinkLayerDiscoveryInfo       = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil})
+	LayerTypeNortelDiscovery              = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)})
+	LayerTypeIGMP                         = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)})
+	LayerTypePFLog                        = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)})
+	LayerTypeRadioTap                     = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)})
+	LayerTypeDot11                        = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)})
+	LayerTypeDot11Ctrl                    = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)})
+	LayerTypeDot11Data                    = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)})
+	LayerTypeDot11DataCFAck               = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+	LayerTypeDot11DataCFPoll              = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+	LayerTypeDot11DataCFAckPoll           = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+	LayerTypeDot11DataNull                = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)})
+	LayerTypeDot11DataCFAckNoData         = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+	LayerTypeDot11DataCFPollNoData        = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+	LayerTypeDot11DataCFAckPollNoData     = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+	LayerTypeDot11DataQOSData             = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)})
+	LayerTypeDot11DataQOSDataCFAck        = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)})
+	LayerTypeDot11DataQOSDataCFPoll       = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)})
+	LayerTypeDot11DataQOSDataCFAckPoll    = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)})
+	LayerTypeDot11DataQOSNull             = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)})
+	LayerTypeDot11DataQOSCFPollNoData     = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)})
+	LayerTypeDot11DataQOSCFAckPollNoData  = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)})
+	LayerTypeDot11InformationElement      = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)})
+	LayerTypeDot11CtrlCTS                 = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)})
+	LayerTypeDot11CtrlRTS                 = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)})
+	LayerTypeDot11CtrlBlockAckReq         = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)})
+	LayerTypeDot11CtrlBlockAck            = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)})
+	LayerTypeDot11CtrlPowersavePoll       = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)})
+	LayerTypeDot11CtrlAck                 = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)})
+	LayerTypeDot11CtrlCFEnd               = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)})
+	LayerTypeDot11CtrlCFEndAck            = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)})
+	LayerTypeDot11MgmtAssociationReq      = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)})
+	LayerTypeDot11MgmtAssociationResp     = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)})
+	LayerTypeDot11MgmtReassociationReq    = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)})
+	LayerTypeDot11MgmtReassociationResp   = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)})
+	LayerTypeDot11MgmtProbeReq            = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)})
+	LayerTypeDot11MgmtProbeResp           = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)})
+	LayerTypeDot11MgmtMeasurementPilot    = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)})
+	LayerTypeDot11MgmtBeacon              = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)})
+	LayerTypeDot11MgmtATIM                = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)})
+	LayerTypeDot11MgmtDisassociation      = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)})
+	LayerTypeDot11MgmtAuthentication      = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)})
+	LayerTypeDot11MgmtDeauthentication    = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)})
+	LayerTypeDot11MgmtAction              = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)})
+	LayerTypeDot11MgmtActionNoAck         = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)})
+	LayerTypeDot11MgmtArubaWLAN           = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)})
+	LayerTypeDot11WEP                     = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)})
+	LayerTypeDNS                          = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)})
+	LayerTypeUSB                          = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)})
+	LayerTypeUSBRequestBlockSetup         = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)})
+	LayerTypeUSBControl                   = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)})
+	LayerTypeUSBInterrupt                 = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)})
+	LayerTypeUSBBulk                      = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)})
+	LayerTypeLinuxSLL                     = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)})
+	LayerTypeSFlow                        = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)})
+	LayerTypePrismHeader                  = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)})
+	LayerTypeVXLAN                        = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)})
+	LayerTypeNTP                          = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)})
+	LayerTypeDHCPv4                       = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)})
+	LayerTypeVRRP                         = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)})
+	LayerTypeGeneve                       = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)})
+	LayerTypeSTP                          = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)})
+	LayerTypeBFD                          = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)})
+	LayerTypeOSPF                         = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)})
+	LayerTypeICMPv6RouterSolicitation     = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)})
+	LayerTypeICMPv6RouterAdvertisement    = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)})
+	LayerTypeICMPv6NeighborSolicitation   = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)})
+	LayerTypeICMPv6NeighborAdvertisement  = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)})
+	LayerTypeICMPv6Redirect               = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)})
+	LayerTypeGTPv1U                       = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)})
+	LayerTypeEAPOLKey                     = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)})
+	LayerTypeLCM                          = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)})
+	LayerTypeICMPv6Echo                   = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)})
+	LayerTypeSIP                          = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)})
+	LayerTypeDHCPv6                       = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)})
+	LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)})
+	LayerTypeMLDv1MulticastListenerDone   = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)})
+	LayerTypeMLDv1MulticastListenerQuery  = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)})
+	LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)})
+	LayerTypeMLDv2MulticastListenerQuery  = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)})
+	LayerTypeTLS                          = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)})
+	LayerTypeModbusTCP                    = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)})
+)
+
+var (
+	// LayerClassIPNetwork contains TCP/IP network layer types.
+	LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPv4,
+		LayerTypeIPv6,
+	})
+	// LayerClassIPTransport contains TCP/IP transport layer types.
+	LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeTCP,
+		LayerTypeUDP,
+		LayerTypeSCTP,
+	})
+	// LayerClassIPControl contains TCP/IP control protocols.
+	LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeICMPv4,
+		LayerTypeICMPv6,
+	})
+	// LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP
+	// layer).
+	LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeSCTPUnknownChunkType,
+		LayerTypeSCTPData,
+		LayerTypeSCTPInit,
+		LayerTypeSCTPSack,
+		LayerTypeSCTPHeartbeat,
+		LayerTypeSCTPError,
+		LayerTypeSCTPShutdown,
+		LayerTypeSCTPShutdownAck,
+		LayerTypeSCTPCookieEcho,
+		LayerTypeSCTPEmptyLayer,
+		LayerTypeSCTPInitAck,
+		LayerTypeSCTPHeartbeatAck,
+		LayerTypeSCTPAbort,
+		LayerTypeSCTPShutdownComplete,
+		LayerTypeSCTPCookieAck,
+	})
+	// LayerClassIPv6Extension contains IPv6 extension headers.
+	LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPv6HopByHop,
+		LayerTypeIPv6Routing,
+		LayerTypeIPv6Fragment,
+		LayerTypeIPv6Destination,
+	})
+	LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPSecAH,
+		LayerTypeIPSecESP,
+	})
+	// LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol
+	// messages.
+	LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeICMPv6RouterSolicitation,
+		LayerTypeICMPv6RouterAdvertisement,
+		LayerTypeICMPv6NeighborSolicitation,
+		LayerTypeICMPv6NeighborAdvertisement,
+		LayerTypeICMPv6Redirect,
+	})
+	// LayerClassMLDv1 contains multicast listener discovery protocol
+	LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeMLDv1MulticastListenerQuery,
+		LayerTypeMLDv1MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerDone,
+	})
+	// LayerClassMLDv2 contains multicast listener discovery protocol v2
+	LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeMLDv1MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerDone,
+		LayerTypeMLDv2MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerQuery,
+		LayerTypeMLDv2MulticastListenerQuery,
+	})
+)
diff --git a/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/google/gopacket/layers/lcm.go
new file mode 100644
index 0000000..5fe9fa5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lcm.go
@@ -0,0 +1,213 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// LCMShortHeaderMagic is the LCM small message header magic number
+	LCMShortHeaderMagic uint32 = 0x4c433032
+	// LCMFragmentedHeaderMagic is the LCM fragmented message header magic number
+	LCMFragmentedHeaderMagic uint32 = 0x4c433033
+)
+
+// LCM (Lightweight Communications and Marshalling) is a set of libraries and
+// tools for message passing and data marshalling, targeted at real-time systems
+// where high-bandwidth and low latency are critical. It provides a
+// publish/subscribe message passing model and automatic
+// marshalling/unmarshalling code generation with bindings for applications in a
+// variety of programming languages.
+//
+// References
+//   https://lcm-proj.github.io/
+//   https://github.com/lcm-proj/lcm
+type LCM struct {
+	// Common (short & fragmented header) fields
+	Magic          uint32
+	SequenceNumber uint32
+	// Fragmented header only fields
+	PayloadSize    uint32
+	FragmentOffset uint32
+	FragmentNumber uint16
+	TotalFragments uint16
+	// Common field
+	ChannelName string
+	// Gopacket helper fields
+	Fragmented  bool
+	fingerprint LCMFingerprint
+	contents    []byte
+	payload     []byte
+}
+
+// LCMFingerprint is the type of a LCM fingerprint.
+type LCMFingerprint uint64
+
+var (
+	// lcmLayerTypes contains a map of all LCM fingerprints that we support and
+	// their LayerType
+	lcmLayerTypes  = map[LCMFingerprint]gopacket.LayerType{}
+	layerTypeIndex = 1001
+)
+
+// RegisterLCMLayerType allows users to register decoders for the underlying
+// LCM payload. This is done based on the fingerprint that every LCM message
+// contains and which identifies it uniquely. If num is not the zero value it
+// will be used when registering with RegisterLayerType towards gopacket,
+// otherwise an incremental value starting from 1001 will be used.
+func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint,
+	decoder gopacket.Decoder) gopacket.LayerType {
+	metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder}
+
+	if num == 0 {
+		num = layerTypeIndex
+		layerTypeIndex++
+	}
+
+	lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata)
+
+	return lcmLayerTypes[fingerprint]
+}
+
+// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has
+// been registered so far.
+func SupportedLCMFingerprints() []LCMFingerprint {
+	fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes))
+	for fp := range lcmLayerTypes {
+		fingerprints = append(fingerprints, fp)
+	}
+	return fingerprints
+}
+
+// GetLCMLayerType returns the underlying LCM message's LayerType.
+// This LayerType has to be registered by using RegisterLCMLayerType.
+func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType {
+	layerType, ok := lcmLayerTypes[fingerprint]
+	if !ok {
+		return gopacket.LayerTypePayload
+	}
+
+	return layerType
+}
+
+func decodeLCM(data []byte, p gopacket.PacketBuilder) error {
+	lcm := &LCM{}
+
+	err := lcm.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	p.AddLayer(lcm)
+	p.SetApplicationLayer(lcm)
+
+	return p.NextDecoder(lcm.NextLayerType())
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	offset := 0
+
+	lcm.Magic = binary.BigEndian.Uint32(data[offset:4])
+	offset += 4
+
+	if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic {
+		return fmt.Errorf("Received LCM header magic %v does not match know "+
+			"LCM magic numbers. Dropping packet.", lcm.Magic)
+	}
+
+	lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8])
+	offset += 4
+
+	if lcm.Magic == LCMFragmentedHeaderMagic {
+		lcm.Fragmented = true
+
+		lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+
+		lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+
+		lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2])
+		offset += 2
+
+		lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	} else {
+		lcm.Fragmented = false
+	}
+
+	if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+		buffer := make([]byte, 0)
+		for _, b := range data[offset:] {
+			offset++
+
+			if b == 0 {
+				break
+			}
+
+			buffer = append(buffer, b)
+		}
+
+		lcm.ChannelName = string(buffer)
+	}
+
+	lcm.fingerprint = LCMFingerprint(
+		binary.BigEndian.Uint64(data[offset : offset+8]))
+
+	lcm.contents = data[:offset]
+	lcm.payload = data[offset:]
+
+	return nil
+}
+
+// CanDecode returns a set of layers that LCM objects can decode.
+// As LCM objects can only decode the LCM layer, we just return that layer.
+func (lcm LCM) CanDecode() gopacket.LayerClass {
+	return LayerTypeLCM
+}
+
+// NextLayerType specifies the LCM payload layer type following this header.
+// As LCM packets are serialized structs with uniq fingerprints for each uniq
+// combination of data types, lookup of correct layer type is based on that
+// fingerprint.
+func (lcm LCM) NextLayerType() gopacket.LayerType {
+	if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+		return GetLCMLayerType(lcm.fingerprint)
+	}
+
+	return gopacket.LayerTypeFragment
+}
+
+// LayerType returns LayerTypeLCM
+func (lcm LCM) LayerType() gopacket.LayerType {
+	return LayerTypeLCM
+}
+
+// LayerContents returns the contents of the LCM header.
+func (lcm LCM) LayerContents() []byte {
+	return lcm.contents
+}
+
+// LayerPayload returns the payload following this LCM header.
+func (lcm LCM) LayerPayload() []byte {
+	return lcm.payload
+}
+
+// Payload returns the payload following this LCM header.
+func (lcm LCM) Payload() []byte {
+	return lcm.LayerPayload()
+}
+
+// Fingerprint returns the LCM fingerprint of the underlying message.
+func (lcm LCM) Fingerprint() LCMFingerprint {
+	return lcm.fingerprint
+}
diff --git a/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/google/gopacket/layers/linux_sll.go
new file mode 100644
index 0000000..85a4f8b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/linux_sll.go
@@ -0,0 +1,98 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+type LinuxSLLPacketType uint16
+
+const (
+	LinuxSLLPacketTypeHost      LinuxSLLPacketType = 0 // To us
+	LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all
+	LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group
+	LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else
+	LinuxSLLPacketTypeOutgoing  LinuxSLLPacketType = 4 // Outgoing of any type
+	// These ones are invisible by user level
+	LinuxSLLPacketTypeLoopback  LinuxSLLPacketType = 5 // MC/BRD frame looped back
+	LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame
+)
+
+func (l LinuxSLLPacketType) String() string {
+	switch l {
+	case LinuxSLLPacketTypeHost:
+		return "host"
+	case LinuxSLLPacketTypeBroadcast:
+		return "broadcast"
+	case LinuxSLLPacketTypeMulticast:
+		return "multicast"
+	case LinuxSLLPacketTypeOtherhost:
+		return "otherhost"
+	case LinuxSLLPacketTypeOutgoing:
+		return "outgoing"
+	case LinuxSLLPacketTypeLoopback:
+		return "loopback"
+	case LinuxSLLPacketTypeFastroute:
+		return "fastroute"
+	}
+	return fmt.Sprintf("Unknown(%d)", int(l))
+}
+
+type LinuxSLL struct {
+	BaseLayer
+	PacketType   LinuxSLLPacketType
+	AddrLen      uint16
+	Addr         net.HardwareAddr
+	EthernetType EthernetType
+	AddrType     uint16
+}
+
+// LayerType returns LayerTypeLinuxSLL.
+func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL }
+
+func (sll *LinuxSLL) CanDecode() gopacket.LayerClass {
+	return LayerTypeLinuxSLL
+}
+
+func (sll *LinuxSLL) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, sll.Addr, nil)
+}
+
+func (sll *LinuxSLL) NextLayerType() gopacket.LayerType {
+	return sll.EthernetType.LayerType()
+}
+
+func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 16 {
+		return errors.New("Linux SLL packet too small")
+	}
+	sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2]))
+	sll.AddrType = binary.BigEndian.Uint16(data[2:4])
+	sll.AddrLen = binary.BigEndian.Uint16(data[4:6])
+
+	sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6])
+	sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16]))
+	sll.BaseLayer = BaseLayer{data[:16], data[16:]}
+
+	return nil
+}
+
+func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error {
+	sll := &LinuxSLL{}
+	if err := sll.DecodeFromBytes(data, p); err != nil {
+		return err
+	}
+	p.AddLayer(sll)
+	p.SetLinkLayer(sll)
+	return p.NextDecoder(sll.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go
new file mode 100644
index 0000000..cad6803
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/llc.go
@@ -0,0 +1,193 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// LLC is the layer used for 802.2 Logical Link Control headers.
+// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf
+type LLC struct {
+	BaseLayer
+	DSAP    uint8
+	IG      bool // true means group, false means individual
+	SSAP    uint8
+	CR      bool // true means response, false means command
+	Control uint16
+}
+
+// LayerType returns gopacket.LayerTypeLLC.
+func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 3 {
+		return errors.New("LLC header too small")
+	}
+	l.DSAP = data[0] & 0xFE
+	l.IG = data[0]&0x1 != 0
+	l.SSAP = data[1] & 0xFE
+	l.CR = data[1]&0x1 != 0
+	l.Control = uint16(data[2])
+
+	if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 {
+		if len(data) < 4 {
+			return errors.New("LLC header too small")
+		}
+		l.Control = l.Control<<8 | uint16(data[3])
+		l.Contents = data[:4]
+		l.Payload = data[4:]
+	} else {
+		l.Contents = data[:3]
+		l.Payload = data[3:]
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *LLC) CanDecode() gopacket.LayerClass {
+	return LayerTypeLLC
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *LLC) NextLayerType() gopacket.LayerType {
+	switch {
+	case l.DSAP == 0xAA && l.SSAP == 0xAA:
+		return LayerTypeSNAP
+	case l.DSAP == 0x42 && l.SSAP == 0x42:
+		return LayerTypeSTP
+	}
+	return gopacket.LayerTypeZero // Not implemented
+}
+
+// SNAP is used inside LLC.  See
+// http://standards.ieee.org/getieee802/download/802-2001.pdf.
+// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol:
+//  "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing,
+//  on networks using IEEE 802.2 LLC, more protocols than can be distinguished
+//  by the 8-bit 802.2 Service Access Point (SAP) fields."
+type SNAP struct {
+	BaseLayer
+	OrganizationalCode []byte
+	Type               EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeSNAP.
+func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 5 {
+		return errors.New("SNAP header too small")
+	}
+	s.OrganizationalCode = data[:3]
+	s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5]))
+	s.BaseLayer = BaseLayer{data[:5], data[5:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *SNAP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSNAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (s *SNAP) NextLayerType() gopacket.LayerType {
+	// See BUG(gconnel) in decodeSNAP
+	return s.Type.LayerType()
+}
+
+func decodeLLC(data []byte, p gopacket.PacketBuilder) error {
+	l := &LLC{}
+	err := l.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(l)
+	return p.NextDecoder(l.NextLayerType())
+}
+
+func decodeSNAP(data []byte, p gopacket.PacketBuilder) error {
+	s := &SNAP{}
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	// BUG(gconnell):  When decoding SNAP, we treat the SNAP type as an Ethernet
+	// type.  This may not actually be an ethernet type in all cases,
+	// depending on the organizational code.  Right now, we don't check.
+	return p.NextDecoder(s.Type)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var igFlag, crFlag byte
+	var length int
+
+	if l.Control&0xFF00 != 0 {
+		length = 4
+	} else {
+		length = 3
+	}
+
+	if l.DSAP&0x1 != 0 {
+		return errors.New("DSAP value invalid, should not include IG flag bit")
+	}
+
+	if l.SSAP&0x1 != 0 {
+		return errors.New("SSAP value invalid, should not include CR flag bit")
+	}
+
+	if buf, err := b.PrependBytes(length); err != nil {
+		return err
+	} else {
+		igFlag = 0
+		if l.IG {
+			igFlag = 0x1
+		}
+
+		crFlag = 0
+		if l.CR {
+			crFlag = 0x1
+		}
+
+		buf[0] = l.DSAP + igFlag
+		buf[1] = l.SSAP + crFlag
+
+		if length == 4 {
+			buf[2] = uint8(l.Control >> 8)
+			buf[3] = uint8(l.Control)
+		} else {
+			buf[2] = uint8(l.Control)
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if buf, err := b.PrependBytes(5); err != nil {
+		return err
+	} else {
+		buf[0] = s.OrganizationalCode[0]
+		buf[1] = s.OrganizationalCode[1]
+		buf[2] = s.OrganizationalCode[2]
+		binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type))
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/google/gopacket/layers/lldp.go
new file mode 100644
index 0000000..e128260
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lldp.go
@@ -0,0 +1,1585 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet.
+type LLDPTLVType byte
+
+const (
+	LLDPTLVEnd             LLDPTLVType = 0
+	LLDPTLVChassisID       LLDPTLVType = 1
+	LLDPTLVPortID          LLDPTLVType = 2
+	LLDPTLVTTL             LLDPTLVType = 3
+	LLDPTLVPortDescription LLDPTLVType = 4
+	LLDPTLVSysName         LLDPTLVType = 5
+	LLDPTLVSysDescription  LLDPTLVType = 6
+	LLDPTLVSysCapabilities LLDPTLVType = 7
+	LLDPTLVMgmtAddress     LLDPTLVType = 8
+	LLDPTLVOrgSpecific     LLDPTLVType = 127
+)
+
+// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer.
+type LinkLayerDiscoveryValue struct {
+	Type   LLDPTLVType
+	Length uint16
+	Value  []byte
+}
+
+func (c *LinkLayerDiscoveryValue) len() int {
+	return 0
+}
+
+// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID
+type LLDPChassisIDSubType byte
+
+// LLDP Chassis Types
+const (
+	LLDPChassisIDSubTypeReserved    LLDPChassisIDSubType = 0
+	LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1
+	LLDPChassisIDSubtypeIfaceAlias  LLDPChassisIDSubType = 2
+	LLDPChassisIDSubTypePortComp    LLDPChassisIDSubType = 3
+	LLDPChassisIDSubTypeMACAddr     LLDPChassisIDSubType = 4
+	LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5
+	LLDPChassisIDSubtypeIfaceName   LLDPChassisIDSubType = 6
+	LLDPChassisIDSubTypeLocal       LLDPChassisIDSubType = 7
+)
+
+type LLDPChassisID struct {
+	Subtype LLDPChassisIDSubType
+	ID      []byte
+}
+
+func (c *LLDPChassisID) serialize() []byte {
+
+	var buf = make([]byte, c.serializedLen())
+	idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+	binary.BigEndian.PutUint16(buf[0:2], idLen)
+	buf[2] = byte(c.Subtype)
+	copy(buf[3:], c.ID)
+	return buf
+}
+
+func (c *LLDPChassisID) serializedLen() int {
+	return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID
+type LLDPPortIDSubType byte
+
+// LLDP PortID types
+const (
+	LLDPPortIDSubtypeReserved       LLDPPortIDSubType = 0
+	LLDPPortIDSubtypeIfaceAlias     LLDPPortIDSubType = 1
+	LLDPPortIDSubtypePortComp       LLDPPortIDSubType = 2
+	LLDPPortIDSubtypeMACAddr        LLDPPortIDSubType = 3
+	LLDPPortIDSubtypeNetworkAddr    LLDPPortIDSubType = 4
+	LLDPPortIDSubtypeIfaceName      LLDPPortIDSubType = 5
+	LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6
+	LLDPPortIDSubtypeLocal          LLDPPortIDSubType = 7
+)
+
+type LLDPPortID struct {
+	Subtype LLDPPortIDSubType
+	ID      []byte
+}
+
+func (c *LLDPPortID) serialize() []byte {
+
+	var buf = make([]byte, c.serializedLen())
+	idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+	binary.BigEndian.PutUint16(buf[0:2], idLen)
+	buf[2] = byte(c.Subtype)
+	copy(buf[3:], c.ID)
+	return buf
+}
+
+func (c *LLDPPortID) serializedLen() int {
+	return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol.
+// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf
+// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded
+// with DecodeValues()
+type LinkLayerDiscovery struct {
+	BaseLayer
+	ChassisID LLDPChassisID
+	PortID    LLDPPortID
+	TTL       uint16
+	Values    []LinkLayerDiscoveryValue
+}
+
+type IEEEOUI uint32
+
+// http://standards.ieee.org/develop/regauth/oui/oui.txt
+const (
+	IEEEOUI8021     IEEEOUI = 0x0080c2
+	IEEEOUI8023     IEEEOUI = 0x00120f
+	IEEEOUI80211    IEEEOUI = 0x000fac
+	IEEEOUI8021Qbg  IEEEOUI = 0x0013BF
+	IEEEOUICisco2   IEEEOUI = 0x000142
+	IEEEOUIMedia    IEEEOUI = 0x0012bb // TR-41
+	IEEEOUIProfinet IEEEOUI = 0x000ecf
+	IEEEOUIDCBX     IEEEOUI = 0x001b21
+)
+
+// LLDPOrgSpecificTLV is an Organisation-specific TLV
+type LLDPOrgSpecificTLV struct {
+	OUI     IEEEOUI
+	SubType uint8
+	Info    []byte
+}
+
+// LLDPCapabilities Types
+const (
+	LLDPCapsOther       uint16 = 1 << 0
+	LLDPCapsRepeater    uint16 = 1 << 1
+	LLDPCapsBridge      uint16 = 1 << 2
+	LLDPCapsWLANAP      uint16 = 1 << 3
+	LLDPCapsRouter      uint16 = 1 << 4
+	LLDPCapsPhone       uint16 = 1 << 5
+	LLDPCapsDocSis      uint16 = 1 << 6
+	LLDPCapsStationOnly uint16 = 1 << 7
+	LLDPCapsCVLAN       uint16 = 1 << 8
+	LLDPCapsSVLAN       uint16 = 1 << 9
+	LLDPCapsTmpr        uint16 = 1 << 10
+)
+
+// LLDPCapabilities represents the capabilities of a device
+type LLDPCapabilities struct {
+	Other       bool
+	Repeater    bool
+	Bridge      bool
+	WLANAP      bool
+	Router      bool
+	Phone       bool
+	DocSis      bool
+	StationOnly bool
+	CVLAN       bool
+	SVLAN       bool
+	TMPR        bool
+}
+
+type LLDPSysCapabilities struct {
+	SystemCap  LLDPCapabilities
+	EnabledCap LLDPCapabilities
+}
+
+type IANAAddressFamily byte
+
+// LLDP Management Address Subtypes
+// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml
+const (
+	IANAAddressFamilyReserved IANAAddressFamily = 0
+	IANAAddressFamilyIPV4     IANAAddressFamily = 1
+	IANAAddressFamilyIPV6     IANAAddressFamily = 2
+	IANAAddressFamilyNSAP     IANAAddressFamily = 3
+	IANAAddressFamilyHDLC     IANAAddressFamily = 4
+	IANAAddressFamilyBBN1822  IANAAddressFamily = 5
+	IANAAddressFamily802      IANAAddressFamily = 6
+	IANAAddressFamilyE163     IANAAddressFamily = 7
+	IANAAddressFamilyE164     IANAAddressFamily = 8
+	IANAAddressFamilyF69      IANAAddressFamily = 9
+	IANAAddressFamilyX121     IANAAddressFamily = 10
+	IANAAddressFamilyIPX      IANAAddressFamily = 11
+	IANAAddressFamilyAtalk    IANAAddressFamily = 12
+	IANAAddressFamilyDecnet   IANAAddressFamily = 13
+	IANAAddressFamilyBanyan   IANAAddressFamily = 14
+	IANAAddressFamilyE164NSAP IANAAddressFamily = 15
+	IANAAddressFamilyDNS      IANAAddressFamily = 16
+	IANAAddressFamilyDistname IANAAddressFamily = 17
+	IANAAddressFamilyASNumber IANAAddressFamily = 18
+	IANAAddressFamilyXTPIPV4  IANAAddressFamily = 19
+	IANAAddressFamilyXTPIPV6  IANAAddressFamily = 20
+	IANAAddressFamilyXTP      IANAAddressFamily = 21
+	IANAAddressFamilyFcWWPN   IANAAddressFamily = 22
+	IANAAddressFamilyFcWWNN   IANAAddressFamily = 23
+	IANAAddressFamilyGWID     IANAAddressFamily = 24
+	IANAAddressFamilyL2VPN    IANAAddressFamily = 25
+)
+
+type LLDPInterfaceSubtype byte
+
+// LLDP Interface Subtypes
+const (
+	LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1
+	LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2
+	LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3
+)
+
+type LLDPMgmtAddress struct {
+	Subtype          IANAAddressFamily
+	Address          []byte
+	InterfaceSubtype LLDPInterfaceSubtype
+	InterfaceNumber  uint32
+	OID              string
+}
+
+// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues
+// Organisation-specific TLV's can be decoded using the various Decode() methods
+type LinkLayerDiscoveryInfo struct {
+	BaseLayer
+	PortDescription string
+	SysName         string
+	SysDescription  string
+	SysCapabilities LLDPSysCapabilities
+	MgmtAddress     LLDPMgmtAddress
+	OrgTLVs         []LLDPOrgSpecificTLV      // Private TLVs
+	Unknown         []LinkLayerDiscoveryValue // undecoded TLVs
+}
+
+/// IEEE 802.1 TLV Subtypes
+const (
+	LLDP8021SubtypePortVLANID       uint8 = 1
+	LLDP8021SubtypeProtocolVLANID   uint8 = 2
+	LLDP8021SubtypeVLANName         uint8 = 3
+	LLDP8021SubtypeProtocolIdentity uint8 = 4
+	LLDP8021SubtypeVDIUsageDigest   uint8 = 5
+	LLDP8021SubtypeManagementVID    uint8 = 6
+	LLDP8021SubtypeLinkAggregation  uint8 = 7
+)
+
+// VLAN Port Protocol ID options
+const (
+	LLDPProtocolVLANIDCapability byte = 1 << 1
+	LLDPProtocolVLANIDStatus     byte = 1 << 2
+)
+
+type PortProtocolVLANID struct {
+	Supported bool
+	Enabled   bool
+	ID        uint16
+}
+
+type VLANName struct {
+	ID   uint16
+	Name string
+}
+
+type ProtocolIdentity []byte
+
+// LACP options
+const (
+	LLDPAggregationCapability byte = 1 << 0
+	LLDPAggregationStatus     byte = 1 << 1
+)
+
+// IEEE 802 Link Aggregation parameters
+type LLDPLinkAggregation struct {
+	Supported bool
+	Enabled   bool
+	PortID    uint32
+}
+
+// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs
+type LLDPInfo8021 struct {
+	PVID               uint16
+	PPVIDs             []PortProtocolVLANID
+	VLANNames          []VLANName
+	ProtocolIdentities []ProtocolIdentity
+	VIDUsageDigest     uint32
+	ManagementVID      uint16
+	LinkAggregation    LLDPLinkAggregation
+}
+
+// IEEE 802.3 TLV Subtypes
+const (
+	LLDP8023SubtypeMACPHY          uint8 = 1
+	LLDP8023SubtypeMDIPower        uint8 = 2
+	LLDP8023SubtypeLinkAggregation uint8 = 3
+	LLDP8023SubtypeMTU             uint8 = 4
+)
+
+// MACPHY options
+const (
+	LLDPMACPHYCapability byte = 1 << 0
+	LLDPMACPHYStatus     byte = 1 << 1
+)
+
+// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType
+const (
+	LLDPMAUTypeUnknown         uint16 = 0
+	LLDPMAUTypeAUI             uint16 = 1
+	LLDPMAUType10Base5         uint16 = 2
+	LLDPMAUTypeFOIRL           uint16 = 3
+	LLDPMAUType10Base2         uint16 = 4
+	LLDPMAUType10BaseT         uint16 = 5
+	LLDPMAUType10BaseFP        uint16 = 6
+	LLDPMAUType10BaseFB        uint16 = 7
+	LLDPMAUType10BaseFL        uint16 = 8
+	LLDPMAUType10BROAD36       uint16 = 9
+	LLDPMAUType10BaseT_HD      uint16 = 10
+	LLDPMAUType10BaseT_FD      uint16 = 11
+	LLDPMAUType10BaseFL_HD     uint16 = 12
+	LLDPMAUType10BaseFL_FD     uint16 = 13
+	LLDPMAUType100BaseT4       uint16 = 14
+	LLDPMAUType100BaseTX_HD    uint16 = 15
+	LLDPMAUType100BaseTX_FD    uint16 = 16
+	LLDPMAUType100BaseFX_HD    uint16 = 17
+	LLDPMAUType100BaseFX_FD    uint16 = 18
+	LLDPMAUType100BaseT2_HD    uint16 = 19
+	LLDPMAUType100BaseT2_FD    uint16 = 20
+	LLDPMAUType1000BaseX_HD    uint16 = 21
+	LLDPMAUType1000BaseX_FD    uint16 = 22
+	LLDPMAUType1000BaseLX_HD   uint16 = 23
+	LLDPMAUType1000BaseLX_FD   uint16 = 24
+	LLDPMAUType1000BaseSX_HD   uint16 = 25
+	LLDPMAUType1000BaseSX_FD   uint16 = 26
+	LLDPMAUType1000BaseCX_HD   uint16 = 27
+	LLDPMAUType1000BaseCX_FD   uint16 = 28
+	LLDPMAUType1000BaseT_HD    uint16 = 29
+	LLDPMAUType1000BaseT_FD    uint16 = 30
+	LLDPMAUType10GBaseX        uint16 = 31
+	LLDPMAUType10GBaseLX4      uint16 = 32
+	LLDPMAUType10GBaseR        uint16 = 33
+	LLDPMAUType10GBaseER       uint16 = 34
+	LLDPMAUType10GBaseLR       uint16 = 35
+	LLDPMAUType10GBaseSR       uint16 = 36
+	LLDPMAUType10GBaseW        uint16 = 37
+	LLDPMAUType10GBaseEW       uint16 = 38
+	LLDPMAUType10GBaseLW       uint16 = 39
+	LLDPMAUType10GBaseSW       uint16 = 40
+	LLDPMAUType10GBaseCX4      uint16 = 41
+	LLDPMAUType2BaseTL         uint16 = 42
+	LLDPMAUType10PASS_TS       uint16 = 43
+	LLDPMAUType100BaseBX10D    uint16 = 44
+	LLDPMAUType100BaseBX10U    uint16 = 45
+	LLDPMAUType100BaseLX10     uint16 = 46
+	LLDPMAUType1000BaseBX10D   uint16 = 47
+	LLDPMAUType1000BaseBX10U   uint16 = 48
+	LLDPMAUType1000BaseLX10    uint16 = 49
+	LLDPMAUType1000BasePX10D   uint16 = 50
+	LLDPMAUType1000BasePX10U   uint16 = 51
+	LLDPMAUType1000BasePX20D   uint16 = 52
+	LLDPMAUType1000BasePX20U   uint16 = 53
+	LLDPMAUType10GBaseT        uint16 = 54
+	LLDPMAUType10GBaseLRM      uint16 = 55
+	LLDPMAUType1000BaseKX      uint16 = 56
+	LLDPMAUType10GBaseKX4      uint16 = 57
+	LLDPMAUType10GBaseKR       uint16 = 58
+	LLDPMAUType10_1GBasePRX_D1 uint16 = 59
+	LLDPMAUType10_1GBasePRX_D2 uint16 = 60
+	LLDPMAUType10_1GBasePRX_D3 uint16 = 61
+	LLDPMAUType10_1GBasePRX_U1 uint16 = 62
+	LLDPMAUType10_1GBasePRX_U2 uint16 = 63
+	LLDPMAUType10_1GBasePRX_U3 uint16 = 64
+	LLDPMAUType10GBasePR_D1    uint16 = 65
+	LLDPMAUType10GBasePR_D2    uint16 = 66
+	LLDPMAUType10GBasePR_D3    uint16 = 67
+	LLDPMAUType10GBasePR_U1    uint16 = 68
+	LLDPMAUType10GBasePR_U3    uint16 = 69
+)
+
+// From RFC 3636 - ifMauAutoNegCapAdvertisedBits
+const (
+	LLDPMAUPMDOther        uint16 = 1 << 15
+	LLDPMAUPMD10BaseT      uint16 = 1 << 14
+	LLDPMAUPMD10BaseT_FD   uint16 = 1 << 13
+	LLDPMAUPMD100BaseT4    uint16 = 1 << 12
+	LLDPMAUPMD100BaseTX    uint16 = 1 << 11
+	LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10
+	LLDPMAUPMD100BaseT2    uint16 = 1 << 9
+	LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8
+	LLDPMAUPMDFDXPAUSE     uint16 = 1 << 7
+	LLDPMAUPMDFDXAPAUSE    uint16 = 1 << 6
+	LLDPMAUPMDFDXSPAUSE    uint16 = 1 << 5
+	LLDPMAUPMDFDXBPAUSE    uint16 = 1 << 4
+	LLDPMAUPMD1000BaseX    uint16 = 1 << 3
+	LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2
+	LLDPMAUPMD1000BaseT    uint16 = 1 << 1
+	LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0
+)
+
+// Inverted ifMauAutoNegCapAdvertisedBits if required
+// (Some manufacturers misinterpreted the spec -
+// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455)
+const (
+	LLDPMAUPMDOtherInv        uint16 = 1 << 0
+	LLDPMAUPMD10BaseTInv      uint16 = 1 << 1
+	LLDPMAUPMD10BaseT_FDInv   uint16 = 1 << 2
+	LLDPMAUPMD100BaseT4Inv    uint16 = 1 << 3
+	LLDPMAUPMD100BaseTXInv    uint16 = 1 << 4
+	LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5
+	LLDPMAUPMD100BaseT2Inv    uint16 = 1 << 6
+	LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7
+	LLDPMAUPMDFDXPAUSEInv     uint16 = 1 << 8
+	LLDPMAUPMDFDXAPAUSEInv    uint16 = 1 << 9
+	LLDPMAUPMDFDXSPAUSEInv    uint16 = 1 << 10
+	LLDPMAUPMDFDXBPAUSEInv    uint16 = 1 << 11
+	LLDPMAUPMD1000BaseXInv    uint16 = 1 << 12
+	LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13
+	LLDPMAUPMD1000BaseTInv    uint16 = 1 << 14
+	LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15
+)
+
+type LLDPMACPHYConfigStatus struct {
+	AutoNegSupported  bool
+	AutoNegEnabled    bool
+	AutoNegCapability uint16
+	MAUType           uint16
+}
+
+// MDI Power options
+const (
+	LLDPMDIPowerPortClass    byte = 1 << 0
+	LLDPMDIPowerCapability   byte = 1 << 1
+	LLDPMDIPowerStatus       byte = 1 << 2
+	LLDPMDIPowerPairsAbility byte = 1 << 3
+)
+
+type LLDPPowerType byte
+
+type LLDPPowerSource byte
+
+type LLDPPowerPriority byte
+
+const (
+	LLDPPowerPriorityUnknown LLDPPowerPriority = 0
+	LLDPPowerPriorityMedium  LLDPPowerPriority = 1
+	LLDPPowerPriorityHigh    LLDPPowerPriority = 2
+	LLDPPowerPriorityLow     LLDPPowerPriority = 3
+)
+
+type LLDPPowerViaMDI8023 struct {
+	PortClassPSE    bool // false = PD
+	PSESupported    bool
+	PSEEnabled      bool
+	PSEPairsAbility bool
+	PSEPowerPair    uint8
+	PSEClass        uint8
+	Type            LLDPPowerType
+	Source          LLDPPowerSource
+	Priority        LLDPPowerPriority
+	Requested       uint16 // 1-510 Watts
+	Allocated       uint16 // 1-510 Watts
+}
+
+// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs
+type LLDPInfo8023 struct {
+	MACPHYConfigStatus LLDPMACPHYConfigStatus
+	PowerViaMDI        LLDPPowerViaMDI8023
+	LinkAggregation    LLDPLinkAggregation
+	MTU                uint16
+}
+
+// IEEE 802.1Qbg TLV Subtypes
+const (
+	LLDP8021QbgEVB   uint8 = 0
+	LLDP8021QbgCDCP  uint8 = 1
+	LLDP8021QbgVDP   uint8 = 2
+	LLDP8021QbgEVB22 uint8 = 13
+)
+
+// LLDPEVBCapabilities Types
+const (
+	LLDPEVBCapsSTD uint16 = 1 << 7
+	LLDPEVBCapsRR  uint16 = 1 << 6
+	LLDPEVBCapsRTE uint16 = 1 << 2
+	LLDPEVBCapsECP uint16 = 1 << 1
+	LLDPEVBCapsVDP uint16 = 1 << 0
+)
+
+// LLDPEVBCapabilities represents the EVB capabilities of a device
+type LLDPEVBCapabilities struct {
+	StandardBridging            bool
+	ReflectiveRelay             bool
+	RetransmissionTimerExponent bool
+	EdgeControlProtocol         bool
+	VSIDiscoveryProtocol        bool
+}
+
+type LLDPEVBSettings struct {
+	Supported      LLDPEVBCapabilities
+	Enabled        LLDPEVBCapabilities
+	SupportedVSIs  uint16
+	ConfiguredVSIs uint16
+	RTEExponent    uint8
+}
+
+// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs
+type LLDPInfo8021Qbg struct {
+	EVBSettings LLDPEVBSettings
+}
+
+type LLDPMediaSubtype uint8
+
+// Media TLV Subtypes
+const (
+	LLDPMediaTypeCapabilities LLDPMediaSubtype = 1
+	LLDPMediaTypeNetwork      LLDPMediaSubtype = 2
+	LLDPMediaTypeLocation     LLDPMediaSubtype = 3
+	LLDPMediaTypePower        LLDPMediaSubtype = 4
+	LLDPMediaTypeHardware     LLDPMediaSubtype = 5
+	LLDPMediaTypeFirmware     LLDPMediaSubtype = 6
+	LLDPMediaTypeSoftware     LLDPMediaSubtype = 7
+	LLDPMediaTypeSerial       LLDPMediaSubtype = 8
+	LLDPMediaTypeManufacturer LLDPMediaSubtype = 9
+	LLDPMediaTypeModel        LLDPMediaSubtype = 10
+	LLDPMediaTypeAssetID      LLDPMediaSubtype = 11
+)
+
+type LLDPMediaClass uint8
+
+// Media Class Values
+const (
+	LLDPMediaClassUndefined   LLDPMediaClass = 0
+	LLDPMediaClassEndpointI   LLDPMediaClass = 1
+	LLDPMediaClassEndpointII  LLDPMediaClass = 2
+	LLDPMediaClassEndpointIII LLDPMediaClass = 3
+	LLDPMediaClassNetwork     LLDPMediaClass = 4
+)
+
+// LLDPMediaCapabilities Types
+const (
+	LLDPMediaCapsLLDP      uint16 = 1 << 0
+	LLDPMediaCapsNetwork   uint16 = 1 << 1
+	LLDPMediaCapsLocation  uint16 = 1 << 2
+	LLDPMediaCapsPowerPSE  uint16 = 1 << 3
+	LLDPMediaCapsPowerPD   uint16 = 1 << 4
+	LLDPMediaCapsInventory uint16 = 1 << 5
+)
+
+// LLDPMediaCapabilities represents the LLDP Media capabilities of a device
+type LLDPMediaCapabilities struct {
+	Capabilities  bool
+	NetworkPolicy bool
+	Location      bool
+	PowerPSE      bool
+	PowerPD       bool
+	Inventory     bool
+	Class         LLDPMediaClass
+}
+
+type LLDPApplicationType uint8
+
+const (
+	LLDPAppTypeReserved            LLDPApplicationType = 0
+	LLDPAppTypeVoice               LLDPApplicationType = 1
+	LLDPappTypeVoiceSignaling      LLDPApplicationType = 2
+	LLDPappTypeGuestVoice          LLDPApplicationType = 3
+	LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4
+	LLDPappTypeSoftphoneVoice      LLDPApplicationType = 5
+	LLDPappTypeVideoConferencing   LLDPApplicationType = 6
+	LLDPappTypeStreamingVideo      LLDPApplicationType = 7
+	LLDPappTypeVideoSignaling      LLDPApplicationType = 8
+)
+
+type LLDPNetworkPolicy struct {
+	ApplicationType LLDPApplicationType
+	Defined         bool
+	Tagged          bool
+	VLANId          uint16
+	L2Priority      uint16
+	DSCPValue       uint8
+}
+
+type LLDPLocationFormat uint8
+
+const (
+	LLDPLocationFormatInvalid    LLDPLocationFormat = 0
+	LLDPLocationFormatCoordinate LLDPLocationFormat = 1
+	LLDPLocationFormatAddress    LLDPLocationFormat = 2
+	LLDPLocationFormatECS        LLDPLocationFormat = 3
+)
+
+type LLDPLocationAddressWhat uint8
+
+const (
+	LLDPLocationAddressWhatDHCP    LLDPLocationAddressWhat = 0
+	LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1
+	LLDPLocationAddressWhatClient  LLDPLocationAddressWhat = 2
+)
+
+type LLDPLocationAddressType uint8
+
+const (
+	LLDPLocationAddressTypeLanguage       LLDPLocationAddressType = 0
+	LLDPLocationAddressTypeNational       LLDPLocationAddressType = 1
+	LLDPLocationAddressTypeCounty         LLDPLocationAddressType = 2
+	LLDPLocationAddressTypeCity           LLDPLocationAddressType = 3
+	LLDPLocationAddressTypeCityDivision   LLDPLocationAddressType = 4
+	LLDPLocationAddressTypeNeighborhood   LLDPLocationAddressType = 5
+	LLDPLocationAddressTypeStreet         LLDPLocationAddressType = 6
+	LLDPLocationAddressTypeLeadingStreet  LLDPLocationAddressType = 16
+	LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17
+	LLDPLocationAddressTypeStreetSuffix   LLDPLocationAddressType = 18
+	LLDPLocationAddressTypeHouseNum       LLDPLocationAddressType = 19
+	LLDPLocationAddressTypeHouseSuffix    LLDPLocationAddressType = 20
+	LLDPLocationAddressTypeLandmark       LLDPLocationAddressType = 21
+	LLDPLocationAddressTypeAdditional     LLDPLocationAddressType = 22
+	LLDPLocationAddressTypeName           LLDPLocationAddressType = 23
+	LLDPLocationAddressTypePostal         LLDPLocationAddressType = 24
+	LLDPLocationAddressTypeBuilding       LLDPLocationAddressType = 25
+	LLDPLocationAddressTypeUnit           LLDPLocationAddressType = 26
+	LLDPLocationAddressTypeFloor          LLDPLocationAddressType = 27
+	LLDPLocationAddressTypeRoom           LLDPLocationAddressType = 28
+	LLDPLocationAddressTypePlace          LLDPLocationAddressType = 29
+	LLDPLocationAddressTypeScript         LLDPLocationAddressType = 128
+)
+
+type LLDPLocationCoordinate struct {
+	LatitudeResolution  uint8
+	Latitude            uint64
+	LongitudeResolution uint8
+	Longitude           uint64
+	AltitudeType        uint8
+	AltitudeResolution  uint16
+	Altitude            uint32
+	Datum               uint8
+}
+
+type LLDPLocationAddressLine struct {
+	Type  LLDPLocationAddressType
+	Value string
+}
+
+type LLDPLocationAddress struct {
+	What         LLDPLocationAddressWhat
+	CountryCode  string
+	AddressLines []LLDPLocationAddressLine
+}
+
+type LLDPLocationECS struct {
+	ELIN string
+}
+
+// LLDP represents a physical location.
+// Only one of the embedded types will contain values, depending on Format.
+type LLDPLocation struct {
+	Format     LLDPLocationFormat
+	Coordinate LLDPLocationCoordinate
+	Address    LLDPLocationAddress
+	ECS        LLDPLocationECS
+}
+
+type LLDPPowerViaMDI struct {
+	Type     LLDPPowerType
+	Source   LLDPPowerSource
+	Priority LLDPPowerPriority
+	Value    uint16
+}
+
+// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs
+type LLDPInfoMedia struct {
+	MediaCapabilities LLDPMediaCapabilities
+	NetworkPolicy     LLDPNetworkPolicy
+	Location          LLDPLocation
+	PowerViaMDI       LLDPPowerViaMDI
+	HardwareRevision  string
+	FirmwareRevision  string
+	SoftwareRevision  string
+	SerialNumber      string
+	Manufacturer      string
+	Model             string
+	AssetID           string
+}
+
+type LLDPCisco2Subtype uint8
+
+// Cisco2 TLV Subtypes
+const (
+	LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1
+)
+
+const (
+	LLDPCiscoPSESupport   uint8 = 1 << 0
+	LLDPCiscoArchShared   uint8 = 1 << 1
+	LLDPCiscoPDSparePair  uint8 = 1 << 2
+	LLDPCiscoPSESparePair uint8 = 1 << 3
+)
+
+// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs
+type LLDPInfoCisco2 struct {
+	PSEFourWirePoESupported       bool
+	PDSparePairArchitectureShared bool
+	PDRequestSparePairPoEOn       bool
+	PSESparePairPoEOn             bool
+}
+
+// Profinet Subtypes
+type LLDPProfinetSubtype uint8
+
+const (
+	LLDPProfinetPNIODelay         LLDPProfinetSubtype = 1
+	LLDPProfinetPNIOPortStatus    LLDPProfinetSubtype = 2
+	LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4
+	LLDPProfinetPNIOChassisMAC    LLDPProfinetSubtype = 5
+	LLDPProfinetPNIOPTCPStatus    LLDPProfinetSubtype = 6
+)
+
+type LLDPPNIODelay struct {
+	RXLocal    uint32
+	RXRemote   uint32
+	TXLocal    uint32
+	TXRemote   uint32
+	CableLocal uint32
+}
+
+type LLDPPNIOPortStatus struct {
+	Class2 uint16
+	Class3 uint16
+}
+
+type LLDPPNIOMRPPortStatus struct {
+	UUID   []byte
+	Status uint16
+}
+
+type LLDPPNIOPTCPStatus struct {
+	MasterAddress     []byte
+	SubdomainUUID     []byte
+	IRDataUUID        []byte
+	PeriodValid       bool
+	PeriodLength      uint32
+	RedPeriodValid    bool
+	RedPeriodBegin    uint32
+	OrangePeriodValid bool
+	OrangePeriodBegin uint32
+	GreenPeriodValid  bool
+	GreenPeriodBegin  uint32
+}
+
+// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs
+type LLDPInfoProfinet struct {
+	PNIODelay         LLDPPNIODelay
+	PNIOPortStatus    LLDPPNIOPortStatus
+	PNIOMRPPortStatus LLDPPNIOMRPPortStatus
+	ChassisMAC        []byte
+	PNIOPTCPStatus    LLDPPNIOPTCPStatus
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscovery.
+func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeLinkLayerDiscovery
+}
+
+// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer.
+func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	chassIDLen := c.ChassisID.serializedLen()
+	portIDLen := c.PortID.serializedLen()
+	vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL
+	if err != nil {
+		return err
+	}
+	copy(vb[:chassIDLen], c.ChassisID.serialize())
+	copy(vb[chassIDLen:], c.PortID.serialize())
+	ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2)
+	binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen)
+	binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL)
+
+	vb, err = b.AppendBytes(2) // End Tlv, 2 bytes
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero
+	return nil
+
+}
+
+func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	var vals []LinkLayerDiscoveryValue
+	vData := data[0:]
+	for len(vData) > 0 {
+		nbit := vData[0] & 0x01
+		t := LLDPTLVType(vData[0] >> 1)
+		val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])}
+		if val.Length > 0 {
+			val.Value = vData[2 : val.Length+2]
+		}
+		vals = append(vals, val)
+		if t == LLDPTLVEnd {
+			break
+		}
+		if len(vData) < int(2+val.Length) {
+			return errors.New("Malformed LinkLayerDiscovery Header")
+		}
+		vData = vData[2+val.Length:]
+	}
+	if len(vals) < 4 {
+		return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+	}
+	c := &LinkLayerDiscovery{}
+	gotEnd := false
+	for _, v := range vals {
+		switch v.Type {
+		case LLDPTLVEnd:
+			gotEnd = true
+		case LLDPTLVChassisID:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery ChassisID TLV")
+			}
+			c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0])
+			c.ChassisID.ID = v.Value[1:]
+		case LLDPTLVPortID:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery PortID TLV")
+			}
+			c.PortID.Subtype = LLDPPortIDSubType(v.Value[0])
+			c.PortID.ID = v.Value[1:]
+		case LLDPTLVTTL:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery TTL TLV")
+			}
+			c.TTL = binary.BigEndian.Uint16(v.Value[0:2])
+		default:
+			c.Values = append(c.Values, v)
+		}
+	}
+	if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd {
+		return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+	}
+	c.Contents = data
+	p.AddLayer(c)
+
+	info := &LinkLayerDiscoveryInfo{}
+	p.AddLayer(info)
+	for _, v := range c.Values {
+		switch v.Type {
+		case LLDPTLVPortDescription:
+			info.PortDescription = string(v.Value)
+		case LLDPTLVSysName:
+			info.SysName = string(v.Value)
+		case LLDPTLVSysDescription:
+			info.SysDescription = string(v.Value)
+		case LLDPTLVSysCapabilities:
+			if err := checkLLDPTLVLen(v, 4); err != nil {
+				return err
+			}
+			info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2]))
+			info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4]))
+		case LLDPTLVMgmtAddress:
+			if err := checkLLDPTLVLen(v, 9); err != nil {
+				return err
+			}
+			mlen := v.Value[0]
+			if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil {
+				return err
+			}
+			info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1])
+			info.MgmtAddress.Address = v.Value[2 : mlen+1]
+			info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1])
+			info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6])
+			olen := v.Value[mlen+6]
+			if err := checkLLDPTLVLen(v, int(mlen+6+olen)); err != nil {
+				return err
+			}
+			info.MgmtAddress.OID = string(v.Value[mlen+9 : mlen+9+olen])
+		case LLDPTLVOrgSpecific:
+			if err := checkLLDPTLVLen(v, 4); err != nil {
+				return err
+			}
+			info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]})
+		}
+	}
+	return nil
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8021 {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8021SubtypePortVLANID:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.PVID = binary.BigEndian.Uint16(o.Info[0:2])
+		case LLDP8021SubtypeProtocolVLANID:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0)
+			en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0)
+			id := binary.BigEndian.Uint16(o.Info[1:3])
+			info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id})
+		case LLDP8021SubtypeVLANName:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			id := binary.BigEndian.Uint16(o.Info[0:2])
+			info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])})
+		case LLDP8021SubtypeProtocolIdentity:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			l := int(o.Info[0])
+			if l > 0 {
+				info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l])
+			}
+		case LLDP8021SubtypeVDIUsageDigest:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4])
+		case LLDP8021SubtypeManagementVID:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2])
+		case LLDP8021SubtypeLinkAggregation:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPAggregationCapability > 0)
+			en := (o.Info[0]&LLDPAggregationStatus > 0)
+			info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8023 {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8023SubtypeMACPHY:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPMACPHYCapability > 0)
+			en := (o.Info[0]&LLDPMACPHYStatus > 0)
+			ca := binary.BigEndian.Uint16(o.Info[1:3])
+			mau := binary.BigEndian.Uint16(o.Info[3:5])
+			info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau}
+		case LLDP8023SubtypeMDIPower:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0)
+			info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0)
+			info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0)
+			info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0)
+			info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1])
+			info.PowerViaMDI.PSEClass = uint8(o.Info[2])
+			if len(o.Info) >= 7 {
+				info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6)
+				info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4)
+				if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+					info.PowerViaMDI.Source += 128 // For Stringify purposes
+				}
+				info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f)
+				info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6])
+				info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8])
+			}
+		case LLDP8023SubtypeLinkAggregation:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPAggregationCapability > 0)
+			en := (o.Info[0]&LLDPAggregationStatus > 0)
+			info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+		case LLDP8023SubtypeMTU:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.MTU = binary.BigEndian.Uint16(o.Info[0:2])
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8021Qbg {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8021QbgEVB:
+			if err = checkLLDPOrgSpecificLen(o, 9); err != nil {
+				return
+			}
+			info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2]))
+			info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4]))
+			info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6])
+			info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8])
+			info.EVBSettings.RTEExponent = uint8(o.Info[8])
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUIMedia {
+			continue
+		}
+		switch LLDPMediaSubtype(o.SubType) {
+		case LLDPMediaTypeCapabilities:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			b := binary.BigEndian.Uint16(o.Info[0:2])
+			info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0
+			info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0
+			info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0
+			info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0
+			info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0
+			info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0
+			info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2])
+		case LLDPMediaTypeNetwork:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0])
+			b := binary.BigEndian.Uint16(o.Info[1:3])
+			info.NetworkPolicy.Defined = (b & 0x8000) == 0
+			info.NetworkPolicy.Tagged = (b & 0x4000) > 0
+			info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1
+			b = binary.BigEndian.Uint16(o.Info[2:4])
+			info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6
+			info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f)
+		case LLDPMediaTypeLocation:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			info.Location.Format = LLDPLocationFormat(o.Info[0])
+			o.Info = o.Info[1:]
+			switch info.Location.Format {
+			case LLDPLocationFormatCoordinate:
+				if err = checkLLDPOrgSpecificLen(o, 16); err != nil {
+					return
+				}
+				info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2
+				b := binary.BigEndian.Uint64(o.Info[0:8])
+				info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24
+				info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2
+				b = binary.BigEndian.Uint64(o.Info[5:13])
+				info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24
+				info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4)
+				b1 := binary.BigEndian.Uint16(o.Info[10:12])
+				info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6
+				b2 := binary.BigEndian.Uint32(o.Info[11:15])
+				info.Location.Coordinate.Altitude = b2 & 0x3fffffff
+				info.Location.Coordinate.Datum = uint8(o.Info[15])
+			case LLDPLocationFormatAddress:
+				if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+					return
+				}
+				//ll := uint8(o.Info[0])
+				info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1])
+				info.Location.Address.CountryCode = string(o.Info[2:4])
+				data := o.Info[4:]
+				for len(data) > 1 {
+					aType := LLDPLocationAddressType(data[0])
+					aLen := int(data[1])
+					if len(data) >= aLen+2 {
+						info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])})
+						data = data[aLen+2:]
+					} else {
+						break
+					}
+				}
+			case LLDPLocationFormatECS:
+				info.Location.ECS.ELIN = string(o.Info)
+			}
+		case LLDPMediaTypePower:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6)
+			info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4)
+			if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+				info.PowerViaMDI.Source += 128 // For Stringify purposes
+			}
+			info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f)
+			info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments
+		case LLDPMediaTypeHardware:
+			info.HardwareRevision = string(o.Info)
+		case LLDPMediaTypeFirmware:
+			info.FirmwareRevision = string(o.Info)
+		case LLDPMediaTypeSoftware:
+			info.SoftwareRevision = string(o.Info)
+		case LLDPMediaTypeSerial:
+			info.SerialNumber = string(o.Info)
+		case LLDPMediaTypeManufacturer:
+			info.Manufacturer = string(o.Info)
+		case LLDPMediaTypeModel:
+			info.Model = string(o.Info)
+		case LLDPMediaTypeAssetID:
+			info.AssetID = string(o.Info)
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUICisco2 {
+			continue
+		}
+		switch LLDPCisco2Subtype(o.SubType) {
+		case LLDPCisco2PowerViaMDI:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0
+			info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0
+			info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0
+			info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUIProfinet {
+			continue
+		}
+		switch LLDPProfinetSubtype(o.SubType) {
+		case LLDPProfinetPNIODelay:
+			if err = checkLLDPOrgSpecificLen(o, 20); err != nil {
+				return
+			}
+			info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4])
+			info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8])
+			info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12])
+			info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16])
+			info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20])
+		case LLDPProfinetPNIOPortStatus:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2])
+			info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4])
+		case LLDPProfinetPNIOMRPPortStatus:
+			if err = checkLLDPOrgSpecificLen(o, 18); err != nil {
+				return
+			}
+			info.PNIOMRPPortStatus.UUID = o.Info[0:16]
+			info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18])
+		case LLDPProfinetPNIOChassisMAC:
+			if err = checkLLDPOrgSpecificLen(o, 6); err != nil {
+				return
+			}
+			info.ChassisMAC = o.Info[0:6]
+		case LLDPProfinetPNIOPTCPStatus:
+			if err = checkLLDPOrgSpecificLen(o, 54); err != nil {
+				return
+			}
+			info.PNIOPTCPStatus.MasterAddress = o.Info[0:6]
+			info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22]
+			info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38]
+			b := binary.BigEndian.Uint32(o.Info[38:42])
+			info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[42:46])
+			info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[46:50])
+			info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[50:54])
+			info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff
+		}
+	}
+	return
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo.
+func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType {
+	return LayerTypeLinkLayerDiscoveryInfo
+}
+
+func getCapabilities(v uint16) (c LLDPCapabilities) {
+	c.Other = (v&LLDPCapsOther > 0)
+	c.Repeater = (v&LLDPCapsRepeater > 0)
+	c.Bridge = (v&LLDPCapsBridge > 0)
+	c.WLANAP = (v&LLDPCapsWLANAP > 0)
+	c.Router = (v&LLDPCapsRouter > 0)
+	c.Phone = (v&LLDPCapsPhone > 0)
+	c.DocSis = (v&LLDPCapsDocSis > 0)
+	c.StationOnly = (v&LLDPCapsStationOnly > 0)
+	c.CVLAN = (v&LLDPCapsCVLAN > 0)
+	c.SVLAN = (v&LLDPCapsSVLAN > 0)
+	c.TMPR = (v&LLDPCapsTmpr > 0)
+	return
+}
+
+func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) {
+	c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+	c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+	c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0
+	c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0
+	c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0
+	c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0
+	return
+}
+
+func (t LLDPTLVType) String() (s string) {
+	switch t {
+	case LLDPTLVEnd:
+		s = "TLV End"
+	case LLDPTLVChassisID:
+		s = "Chassis ID"
+	case LLDPTLVPortID:
+		s = "Port ID"
+	case LLDPTLVTTL:
+		s = "TTL"
+	case LLDPTLVPortDescription:
+		s = "Port Description"
+	case LLDPTLVSysName:
+		s = "System Name"
+	case LLDPTLVSysDescription:
+		s = "System Description"
+	case LLDPTLVSysCapabilities:
+		s = "System Capabilities"
+	case LLDPTLVMgmtAddress:
+		s = "Management Address"
+	case LLDPTLVOrgSpecific:
+		s = "Organisation Specific"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPChassisIDSubType) String() (s string) {
+	switch t {
+	case LLDPChassisIDSubTypeReserved:
+		s = "Reserved"
+	case LLDPChassisIDSubTypeChassisComp:
+		s = "Chassis Component"
+	case LLDPChassisIDSubtypeIfaceAlias:
+		s = "Interface Alias"
+	case LLDPChassisIDSubTypePortComp:
+		s = "Port Component"
+	case LLDPChassisIDSubTypeMACAddr:
+		s = "MAC Address"
+	case LLDPChassisIDSubTypeNetworkAddr:
+		s = "Network Address"
+	case LLDPChassisIDSubtypeIfaceName:
+		s = "Interface Name"
+	case LLDPChassisIDSubTypeLocal:
+		s = "Local"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPortIDSubType) String() (s string) {
+	switch t {
+	case LLDPPortIDSubtypeReserved:
+		s = "Reserved"
+	case LLDPPortIDSubtypeIfaceAlias:
+		s = "Interface Alias"
+	case LLDPPortIDSubtypePortComp:
+		s = "Port Component"
+	case LLDPPortIDSubtypeMACAddr:
+		s = "MAC Address"
+	case LLDPPortIDSubtypeNetworkAddr:
+		s = "Network Address"
+	case LLDPPortIDSubtypeIfaceName:
+		s = "Interface Name"
+	case LLDPPortIDSubtypeAgentCircuitID:
+		s = "Agent Circuit ID"
+	case LLDPPortIDSubtypeLocal:
+		s = "Local"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t IANAAddressFamily) String() (s string) {
+	switch t {
+	case IANAAddressFamilyReserved:
+		s = "Reserved"
+	case IANAAddressFamilyIPV4:
+		s = "IPv4"
+	case IANAAddressFamilyIPV6:
+		s = "IPv6"
+	case IANAAddressFamilyNSAP:
+		s = "NSAP"
+	case IANAAddressFamilyHDLC:
+		s = "HDLC"
+	case IANAAddressFamilyBBN1822:
+		s = "BBN 1822"
+	case IANAAddressFamily802:
+		s = "802 media plus Ethernet 'canonical format'"
+	case IANAAddressFamilyE163:
+		s = "E.163"
+	case IANAAddressFamilyE164:
+		s = "E.164 (SMDS, Frame Relay, ATM)"
+	case IANAAddressFamilyF69:
+		s = "F.69 (Telex)"
+	case IANAAddressFamilyX121:
+		s = "X.121, X.25, Frame Relay"
+	case IANAAddressFamilyIPX:
+		s = "IPX"
+	case IANAAddressFamilyAtalk:
+		s = "Appletalk"
+	case IANAAddressFamilyDecnet:
+		s = "Decnet IV"
+	case IANAAddressFamilyBanyan:
+		s = "Banyan Vines"
+	case IANAAddressFamilyE164NSAP:
+		s = "E.164 with NSAP format subaddress"
+	case IANAAddressFamilyDNS:
+		s = "DNS"
+	case IANAAddressFamilyDistname:
+		s = "Distinguished Name"
+	case IANAAddressFamilyASNumber:
+		s = "AS Number"
+	case IANAAddressFamilyXTPIPV4:
+		s = "XTP over IP version 4"
+	case IANAAddressFamilyXTPIPV6:
+		s = "XTP over IP version 6"
+	case IANAAddressFamilyXTP:
+		s = "XTP native mode XTP"
+	case IANAAddressFamilyFcWWPN:
+		s = "Fibre Channel World-Wide Port Name"
+	case IANAAddressFamilyFcWWNN:
+		s = "Fibre Channel World-Wide Node Name"
+	case IANAAddressFamilyGWID:
+		s = "GWID"
+	case IANAAddressFamilyL2VPN:
+		s = "AFI for Layer 2 VPN"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPInterfaceSubtype) String() (s string) {
+	switch t {
+	case LLDPInterfaceSubtypeUnknown:
+		s = "Unknown"
+	case LLDPInterfaceSubtypeifIndex:
+		s = "IfIndex"
+	case LLDPInterfaceSubtypeSysPort:
+		s = "System Port Number"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerType) String() (s string) {
+	switch t {
+	case 0:
+		s = "Type 2 PSE Device"
+	case 1:
+		s = "Type 2 PD Device"
+	case 2:
+		s = "Type 1 PSE Device"
+	case 3:
+		s = "Type 1 PD Device"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerSource) String() (s string) {
+	switch t {
+	// PD Device
+	case 0:
+		s = "Unknown"
+	case 1:
+		s = "PSE"
+	case 2:
+		s = "Local"
+	case 3:
+		s = "PSE and Local"
+	// PSE Device  (Actual value  + 128)
+	case 128:
+		s = "Unknown"
+	case 129:
+		s = "Primary Power Source"
+	case 130:
+		s = "Backup Power Source"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerPriority) String() (s string) {
+	switch t {
+	case 0:
+		s = "Unknown"
+	case 1:
+		s = "Critical"
+	case 2:
+		s = "High"
+	case 3:
+		s = "Low"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPMediaSubtype) String() (s string) {
+	switch t {
+	case LLDPMediaTypeCapabilities:
+		s = "Media Capabilities "
+	case LLDPMediaTypeNetwork:
+		s = "Network Policy"
+	case LLDPMediaTypeLocation:
+		s = "Location Identification"
+	case LLDPMediaTypePower:
+		s = "Extended Power-via-MDI"
+	case LLDPMediaTypeHardware:
+		s = "Hardware Revision"
+	case LLDPMediaTypeFirmware:
+		s = "Firmware Revision"
+	case LLDPMediaTypeSoftware:
+		s = "Software Revision"
+	case LLDPMediaTypeSerial:
+		s = "Serial Number"
+	case LLDPMediaTypeManufacturer:
+		s = "Manufacturer"
+	case LLDPMediaTypeModel:
+		s = "Model"
+	case LLDPMediaTypeAssetID:
+		s = "Asset ID"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPMediaClass) String() (s string) {
+	switch t {
+	case LLDPMediaClassUndefined:
+		s = "Undefined"
+	case LLDPMediaClassEndpointI:
+		s = "Endpoint Class I"
+	case LLDPMediaClassEndpointII:
+		s = "Endpoint Class II"
+	case LLDPMediaClassEndpointIII:
+		s = "Endpoint Class III"
+	case LLDPMediaClassNetwork:
+		s = "Network connectivity "
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPApplicationType) String() (s string) {
+	switch t {
+	case LLDPAppTypeReserved:
+		s = "Reserved"
+	case LLDPAppTypeVoice:
+		s = "Voice"
+	case LLDPappTypeVoiceSignaling:
+		s = "Voice Signaling"
+	case LLDPappTypeGuestVoice:
+		s = "Guest Voice"
+	case LLDPappTypeGuestVoiceSignaling:
+		s = "Guest Voice Signaling"
+	case LLDPappTypeSoftphoneVoice:
+		s = "Softphone Voice"
+	case LLDPappTypeVideoConferencing:
+		s = "Video Conferencing"
+	case LLDPappTypeStreamingVideo:
+		s = "Streaming Video"
+	case LLDPappTypeVideoSignaling:
+		s = "Video Signaling"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPLocationFormat) String() (s string) {
+	switch t {
+	case LLDPLocationFormatInvalid:
+		s = "Invalid"
+	case LLDPLocationFormatCoordinate:
+		s = "Coordinate-based LCI"
+	case LLDPLocationFormatAddress:
+		s = "Address-based LCO"
+	case LLDPLocationFormatECS:
+		s = "ECS ELIN"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPLocationAddressType) String() (s string) {
+	switch t {
+	case LLDPLocationAddressTypeLanguage:
+		s = "Language"
+	case LLDPLocationAddressTypeNational:
+		s = "National subdivisions (province, state, etc)"
+	case LLDPLocationAddressTypeCounty:
+		s = "County, parish, district"
+	case LLDPLocationAddressTypeCity:
+		s = "City, township"
+	case LLDPLocationAddressTypeCityDivision:
+		s = "City division, borough, ward"
+	case LLDPLocationAddressTypeNeighborhood:
+		s = "Neighborhood, block"
+	case LLDPLocationAddressTypeStreet:
+		s = "Street"
+	case LLDPLocationAddressTypeLeadingStreet:
+		s = "Leading street direction"
+	case LLDPLocationAddressTypeTrailingStreet:
+		s = "Trailing street suffix"
+	case LLDPLocationAddressTypeStreetSuffix:
+		s = "Street suffix"
+	case LLDPLocationAddressTypeHouseNum:
+		s = "House number"
+	case LLDPLocationAddressTypeHouseSuffix:
+		s = "House number suffix"
+	case LLDPLocationAddressTypeLandmark:
+		s = "Landmark or vanity address"
+	case LLDPLocationAddressTypeAdditional:
+		s = "Additional location information"
+	case LLDPLocationAddressTypeName:
+		s = "Name"
+	case LLDPLocationAddressTypePostal:
+		s = "Postal/ZIP code"
+	case LLDPLocationAddressTypeBuilding:
+		s = "Building"
+	case LLDPLocationAddressTypeUnit:
+		s = "Unit"
+	case LLDPLocationAddressTypeFloor:
+		s = "Floor"
+	case LLDPLocationAddressTypeRoom:
+		s = "Room number"
+	case LLDPLocationAddressTypePlace:
+		s = "Place type"
+	case LLDPLocationAddressTypeScript:
+		s = "Script"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) {
+	if len(v.Value) < l {
+		err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l)
+	}
+	return
+}
+
+func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) {
+	if len(o.Info) < l {
+		err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l)
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/google/gopacket/layers/loopback.go
new file mode 100644
index 0000000..839f760
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/loopback.go
@@ -0,0 +1,80 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// Loopback contains the header for loopback encapsulation.  This header is
+// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL
+// and DLT_LOOP, respectively).
+type Loopback struct {
+	BaseLayer
+	Family ProtocolFamily
+}
+
+// LayerType returns LayerTypeLoopback.
+func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		return errors.New("Loopback packet too small")
+	}
+
+	// The protocol could be either big-endian or little-endian, we're
+	// not sure.  But we're PRETTY sure that the value is less than
+	// 256, so we can check the first two bytes.
+	var prot uint32
+	if data[0] == 0 && data[1] == 0 {
+		prot = binary.BigEndian.Uint32(data[:4])
+	} else {
+		prot = binary.LittleEndian.Uint32(data[:4])
+	}
+	if prot > 0xFF {
+		return fmt.Errorf("Invalid loopback protocol %q", data[:4])
+	}
+
+	l.Family = ProtocolFamily(prot)
+	l.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *Loopback) CanDecode() gopacket.LayerClass {
+	return LayerTypeLoopback
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *Loopback) NextLayerType() gopacket.LayerType {
+	return l.Family.LayerType()
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	binary.LittleEndian.PutUint32(bytes, uint32(l.Family))
+	return nil
+}
+
+func decodeLoopback(data []byte, p gopacket.PacketBuilder) error {
+	l := Loopback{}
+	if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil {
+		return err
+	}
+	p.AddLayer(&l)
+	return p.NextDecoder(l.Family)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/google/gopacket/layers/mldv1.go
new file mode 100644
index 0000000..e1bb1dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv1.go
@@ -0,0 +1,182 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+// MLDv1Message represents the common structure of all MLDv1 messages
+type MLDv1Message struct {
+	BaseLayer
+	// 3.4. Maximum Response Delay
+	MaximumResponseDelay time.Duration
+	// 3.6. Multicast Address
+	// Zero in general query
+	// Specific IPv6 multicast address otherwise
+	MulticastAddress net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1")
+	}
+
+	m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond
+	// data[2:4] is reserved and not used in mldv1
+	m.MulticastAddress = data[4:20]
+
+	return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv1Message) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	if m.MaximumResponseDelay < 0 {
+		return errors.New("maximum response delay must not be negative")
+	}
+	dms := m.MaximumResponseDelay / time.Millisecond
+	if dms > math.MaxUint16 {
+		return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms)
+	}
+	binary.BigEndian.PutUint16(buf[0:2], uint16(dms))
+
+	copy(buf[2:4], []byte{0x0, 0x0})
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv1Message) String() string {
+	return fmt.Sprintf(
+		"Maximum Response Delay: %dms, Multicast Address: %s",
+		m.MaximumResponseDelay/time.Millisecond,
+		m.MulticastAddress)
+}
+
+// MLDv1MulticastListenerQueryMessage are sent by the router to determine
+// whether there are multicast listeners on the link.
+// https://tools.ietf.org/html/rfc2710 Page 5
+type MLDv1MulticastListenerQueryMessage struct {
+	MLDv1Message
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	err := m.MLDv1Message.DecodeFromBytes(data, df)
+	if err != nil {
+		return err
+	}
+
+	if len(data) > 20 {
+		m.Payload = data[20:]
+	}
+
+	return nil
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerQuery.
+func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// IsGeneralQuery is true when this is a general query.
+// In a Query message, the Multicast Address field is set to zero when
+// sending a General Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool {
+	return net.IPv6zero.Equal(m.MulticastAddress)
+}
+
+// IsSpecificQuery is true when this is not a general query.
+// In a Query message, the Multicast Address field is set to a specific
+// IPv6 multicast address when sending a Multicast-Address-Specific Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool {
+	return !m.IsGeneralQuery()
+}
+
+// MLDv1MulticastListenerReportMessage is sent by a client listening on
+// a specific multicast address to indicate that it is (still) listening
+// on the specific multicast address.
+// https://tools.ietf.org/html/rfc2710 Page 6
+type MLDv1MulticastListenerReportMessage struct {
+	MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerReport.
+func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerReport
+}
+
+// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases
+// to listen to a multicast address on an interface.
+// https://tools.ietf.org/html/rfc2710 Page 7
+type MLDv1MulticastListenerDoneMessage struct {
+	MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerDone.
+func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerDone
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerDone
+}
+
+func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerReportMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerQueryMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerDoneMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/google/gopacket/layers/mldv2.go
new file mode 100644
index 0000000..248cf74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv2.go
@@ -0,0 +1,619 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// S Flag bit is 1
+	mldv2STrue uint8 = 0x8
+
+	// S Flag value mask
+	//   mldv2STrue & mldv2SMask == mldv2STrue  // true
+	//          0x1 & mldv2SMask == mldv2STrue  // true
+	//          0x0 & mldv2SMask == mldv2STrue  // false
+	mldv2SMask uint8 = 0x8
+
+	// QRV value mask
+	mldv2QRVMask uint8 = 0x7
+)
+
+// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the
+// multicast listening state of neighboring interfaces.
+// https://tools.ietf.org/html/rfc3810#section-5.1
+//
+// Some information, like Maximum Response Code and Multicast Address are in the
+// previous layer LayerTypeMLDv1MulticastListenerQuery
+type MLDv2MulticastListenerQueryMessage struct {
+	BaseLayer
+	// 5.1.3. Maximum Response Delay COde
+	MaximumResponseCode uint16
+	// 5.1.5. Multicast Address
+	// Zero in general query
+	// Specific IPv6 multicast address otherwise
+	MulticastAddress net.IP
+	// 5.1.7. S Flag (Suppress Router-Side Processing)
+	SuppressRoutersideProcessing bool
+	// 5.1.8. QRV (Querier's Robustness Variable)
+	QueriersRobustnessVariable uint8
+	// 5.1.9. QQIC (Querier's Query Interval Code)
+	QueriersQueryIntervalCode uint8
+	// 5.1.10. Number of Sources (N)
+	NumberOfSources uint16
+	// 5.1.11 Source Address [i]
+	SourceAddresses []net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 24 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2")
+	}
+
+	m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2])
+	// ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4
+	m.MulticastAddress = data[4:20]
+	m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue
+	m.QueriersRobustnessVariable = data[20] & mldv2QRVMask
+	m.QueriersQueryIntervalCode = data[21]
+
+	m.NumberOfSources = binary.BigEndian.Uint16(data[22:24])
+
+	var end int
+	for i := uint16(0); i < m.NumberOfSources; i++ {
+		begin := 24 + (int(i) * 16)
+		end = begin + 16
+
+		if end > len(data) {
+			df.SetTruncated()
+			return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end)
+		}
+
+		m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+	}
+
+	return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(24)
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode)
+	copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	byte20 := m.QueriersRobustnessVariable & mldv2QRVMask
+	if m.SuppressRoutersideProcessing {
+		byte20 |= mldv2STrue
+	} else {
+		byte20 &= ^mldv2STrue // the complement of mldv2STrue
+	}
+	byte20 &= 0x0F // set reserved bits to zero
+	buf[20] = byte20
+
+	binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources)
+	buf[21] = m.QueriersQueryIntervalCode
+
+	return nil
+}
+
+// writes each source address to the buffer preserving the order
+func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	numberOfSourceAddresses := len(m.SourceAddresses)
+	if numberOfSourceAddresses > math.MaxUint16 {
+		return fmt.Errorf(
+			"there are more than %d source addresses, but 65535 is the maximum number of supported addresses",
+			numberOfSourceAddresses)
+	}
+
+	if opts.FixLengths {
+		m.NumberOfSources = uint16(numberOfSourceAddresses)
+	}
+
+	lastSAIdx := numberOfSourceAddresses - 1
+	for k := range m.SourceAddresses {
+		i := lastSAIdx - k // reverse order
+
+		buf, err := b.PrependBytes(16)
+		if err != nil {
+			return err
+		}
+
+		sa16 := m.SourceAddresses[i].To16()
+		if sa16 == nil {
+			return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+		}
+		copy(buf[0:16], sa16)
+	}
+
+	return nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerQueryMessage) String() string {
+	return fmt.Sprintf(
+		"Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s",
+		m.MaximumResponseCode,
+		m.MaximumResponseDelay(),
+		m.MulticastAddress,
+		m.SuppressRoutersideProcessing,
+		m.QueriersRobustnessVariable,
+		m.QueriersQueryIntervalCode,
+		m.QQI()/time.Second,
+		m.NumberOfSources,
+		len(m.SourceAddresses),
+		m.SourceAddresses)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// QQI calculates the Querier's Query Interval based on the QQIC
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration {
+	data := m.QueriersQueryIntervalCode
+	if data < 128 {
+		return time.Second * time.Duration(data)
+	}
+
+	exp := uint16(data) & 0x70 >> 4
+	mant := uint16(data) & 0x0F
+	return time.Second * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetQQI calculates and updates the Querier's Query Interval Code (QQIC)
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error {
+	if d < 0 {
+		m.QueriersQueryIntervalCode = 0
+		return errors.New("QQI duration is negative")
+	}
+
+	if d == 0 {
+		m.QueriersQueryIntervalCode = 0
+		return nil
+	}
+
+	dms := d / time.Second
+	if dms < 128 {
+		m.QueriersQueryIntervalCode = uint8(dms)
+	}
+
+	if dms > 31744 { // mant=0xF, exp=0x7
+		m.QueriersQueryIntervalCode = 0xFF
+		return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms)
+	}
+
+	value := uint16(dms) // ok, because 31744 < math.MaxUint16
+	exp := uint8(7)
+	for mask := uint16(0x4000); exp > 0; exp-- {
+		if mask&value != 0 {
+			break
+		}
+
+		mask >>= 1
+	}
+
+	mant := uint8(0x000F & (value >> (exp + 3)))
+	sig := uint8(0x10)
+	m.QueriersQueryIntervalCode = sig | exp<<4 | mant
+
+	return nil
+}
+
+// MaximumResponseDelay returns the Maximum Response Delay based on the
+// Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration {
+	if m.MaximumResponseCode < 0x8000 {
+		return time.Duration(m.MaximumResponseCode)
+	}
+
+	exp := m.MaximumResponseCode & 0x7000 >> 12
+	mant := m.MaximumResponseCode & 0x0FFF
+
+	return time.Millisecond * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error {
+	if d == 0 {
+		m.MaximumResponseCode = 0
+		return nil
+	}
+
+	if d < 0 {
+		return errors.New("maximum response delay must not be negative")
+	}
+
+	dms := d / time.Millisecond
+
+	if dms < 32768 {
+		m.MaximumResponseCode = uint16(dms)
+	}
+
+	if dms > 4193280 { // mant=0xFFF, exp=0x7
+		return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms)
+	}
+
+	value := uint32(dms) // ok, because 4193280 < math.MaxUint32
+	exp := uint8(7)
+	for mask := uint32(0x40000000); exp > 0; exp-- {
+		if mask&value != 0 {
+			break
+		}
+
+		mask >>= 1
+	}
+
+	mant := uint16(0x00000FFF & (value >> (exp + 3)))
+	sig := uint16(0x1000)
+	m.MaximumResponseCode = sig | uint16(exp)<<12 | mant
+	return nil
+}
+
+// MLDv2MulticastListenerReportMessage is sent by an IP node to report the
+// current multicast listening state, or changes therein.
+// https://tools.ietf.org/html/rfc3810#section-5.2
+type MLDv2MulticastListenerReportMessage struct {
+	BaseLayer
+	// 5.2.3. Nr of Mcast Address Records
+	NumberOfMulticastAddressRecords uint16
+	// 5.2.4. Multicast Address Record [i]
+	MulticastAddressRecords []MLDv2MulticastAddressRecord
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2")
+	}
+
+	// ignore data[0:2] as per RFC
+	// https://tools.ietf.org/html/rfc3810#section-5.2.1
+	m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4])
+
+	begin := 4
+	for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ {
+		mar := MLDv2MulticastAddressRecord{}
+		read, err := mar.decode(data[begin:], df)
+		if err != nil {
+			return err
+		}
+
+		m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar)
+
+		begin += read
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	lastItemIdx := len(m.MulticastAddressRecords) - 1
+	for k := range m.MulticastAddressRecords {
+		i := lastItemIdx - k // reverse order
+
+		err := m.MulticastAddressRecords[i].serializeTo(b, opts)
+		if err != nil {
+			return err
+		}
+	}
+
+	if opts.FixLengths {
+		numberOfMAR := len(m.MulticastAddressRecords)
+		if numberOfMAR > math.MaxUint16 {
+			return fmt.Errorf(
+				"%d multicast address records added, but the maximum is 65535",
+				numberOfMAR)
+		}
+
+		m.NumberOfMulticastAddressRecords = uint16(numberOfMAR)
+	}
+
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	copy(buf[0:2], []byte{0x0, 0x0})
+	binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords)
+	return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerReportMessage) String() string {
+	return fmt.Sprintf(
+		"Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v",
+		m.NumberOfMulticastAddressRecords,
+		len(m.MulticastAddressRecords),
+		m.MulticastAddressRecords)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv2MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv2MulticastListenerReport
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// MLDv2MulticastAddressRecordType holds the type of a
+// Multicast Address Record, according to
+// https://tools.ietf.org/html/rfc3810#section-5.2.5 and
+// https://tools.ietf.org/html/rfc3810#section-5.2.12
+type MLDv2MulticastAddressRecordType uint8
+
+const (
+	// MLDv2MulticastAddressRecordTypeModeIsIncluded stands for
+	// MODE_IS_INCLUDE - indicates that the interface has a filter
+	// mode of INCLUDE for the specified multicast address.
+	MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1
+	// MLDv2MulticastAddressRecordTypeModeIsExcluded stands for
+	// MODE_IS_EXCLUDE - indicates that the interface has a filter
+	// mode of EXCLUDE for the specified multicast address.
+	MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2
+	// MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for
+	// CHANGE_TO_INCLUDE_MODE - indicates that the interface has
+	// changed to INCLUDE filter mode for the specified multicast
+	// address.
+	MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3
+	// MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for
+	// CHANGE_TO_EXCLUDE_MODE - indicates that the interface has
+	// changed to EXCLUDE filter mode for the specified multicast
+	// address
+	MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4
+	// MLDv2MulticastAddressRecordTypeAllowNewSources stands for
+	// ALLOW_NEW_SOURCES - indicates that the Source Address [i]
+	// fields in this Multicast Address Record contain a list of
+	// the additional sources that the node wishes to listen to,
+	// for packets sent to the specified multicast address.
+	MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5
+	// MLDv2MulticastAddressRecordTypeBlockOldSources stands for
+	// BLOCK_OLD_SOURCES - indicates that the Source Address [i]
+	// fields in this Multicast Address Record contain a list of
+	// the sources that the node no longer wishes to listen to,
+	// for packets sent to the specified multicast address.
+	MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6
+)
+
+// Human readable record types
+// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12
+func (m MLDv2MulticastAddressRecordType) String() string {
+	switch m {
+	case MLDv2MulticastAddressRecordTypeModeIsIncluded:
+		return "MODE_IS_INCLUDE"
+	case MLDv2MulticastAddressRecordTypeModeIsExcluded:
+		return "MODE_IS_EXCLUDE"
+	case MLDv2MulticastAddressRecordTypeChangeToIncludeMode:
+		return "CHANGE_TO_INCLUDE_MODE"
+	case MLDv2MulticastAddressRecordTypeChangeToExcludeMode:
+		return "CHANGE_TO_EXCLUDE_MODE"
+	case MLDv2MulticastAddressRecordTypeAllowNewSources:
+		return "ALLOW_NEW_SOURCES"
+	case MLDv2MulticastAddressRecordTypeBlockOldSources:
+		return "BLOCK_OLD_SOURCES"
+	default:
+		return fmt.Sprintf("UNKNOWN(%d)", m)
+	}
+}
+
+// MLDv2MulticastAddressRecord contains information on the sender listening to a
+// single multicast address on the interface the report is sent.
+// https://tools.ietf.org/html/rfc3810#section-5.2.4
+type MLDv2MulticastAddressRecord struct {
+	// 5.2.5. Record Type
+	RecordType MLDv2MulticastAddressRecordType
+	// 5.2.6. Auxiliary Data Length (number of 32-bit words)
+	AuxDataLen uint8
+	// 5.2.7. Number Of Sources (N)
+	N uint16
+	// 5.2.8. Multicast Address
+	MulticastAddress net.IP
+	// 5.2.9 Source Address [i]
+	SourceAddresses []net.IP
+	// 5.2.10 Auxiliary Data
+	AuxiliaryData []byte
+}
+
+// decodes a multicast address record from bytes
+func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return 0, errors.New(
+			"Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record")
+	}
+
+	m.RecordType = MLDv2MulticastAddressRecordType(data[0])
+	m.AuxDataLen = data[1]
+	m.N = binary.BigEndian.Uint16(data[2:4])
+	m.MulticastAddress = data[4:20]
+
+	for i := uint16(0); i < m.N; i++ {
+		begin := 20 + (int(i) * 16)
+		end := begin + 16
+
+		if len(data) < end {
+			df.SetTruncated()
+			return begin, fmt.Errorf(
+				"Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end)
+		}
+
+		m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+	}
+
+	expectedLengthWithouAuxData := 20 + (int(m.N) * 16)
+	expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words
+	if len(data) < expectedTotalLength {
+		return expectedLengthWithouAuxData, fmt.Errorf(
+			"Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record",
+			expectedLengthWithouAuxData)
+	}
+
+	m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength]
+
+	return expectedTotalLength, nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastAddressRecord) String() string {
+	return fmt.Sprintf(
+		"RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x",
+		m.RecordType,
+		m.RecordType.String(),
+		m.AuxDataLen,
+		m.N,
+		m.MulticastAddress.To16(),
+		m.SourceAddresses,
+		m.AuxiliaryData)
+}
+
+// serializes a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := m.serializeAuxiliaryDataTo(b, opts); err != nil {
+		return err
+	}
+
+	if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = uint8(m.RecordType)
+	buf[1] = m.AuxDataLen
+	binary.BigEndian.PutUint16(buf[2:4], m.N)
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	return nil
+}
+
+// serializes the auxiliary data of a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if remainder := len(m.AuxiliaryData) % 4; remainder != 0 {
+		zeroWord := []byte{0x0, 0x0, 0x0, 0x0}
+		m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...)
+	}
+
+	if opts.FixLengths {
+		auxDataLen := len(m.AuxiliaryData) / 4
+
+		if auxDataLen > math.MaxUint8 {
+			return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen)
+		}
+
+		m.AuxDataLen = uint8(auxDataLen)
+	}
+
+	buf, err := b.PrependBytes(len(m.AuxiliaryData))
+	if err != nil {
+		return err
+	}
+
+	copy(buf, m.AuxiliaryData)
+	return nil
+}
+
+// serializes the source addresses of a multicast address record preserving the order
+func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if opts.FixLengths {
+		numberOfSourceAddresses := len(m.SourceAddresses)
+
+		if numberOfSourceAddresses > math.MaxUint16 {
+			return fmt.Errorf(
+				"%d source addresses added, but the maximum is 65535",
+				numberOfSourceAddresses)
+		}
+
+		m.N = uint16(numberOfSourceAddresses)
+	}
+
+	lastItemIdx := len(m.SourceAddresses) - 1
+	for k := range m.SourceAddresses {
+		i := lastItemIdx - k // reverse order
+
+		buf, err := b.PrependBytes(16)
+		if err != nil {
+			return err
+		}
+
+		sa16 := m.SourceAddresses[i].To16()
+		if sa16 == nil {
+			return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+		}
+		copy(buf, sa16)
+	}
+
+	return nil
+}
+
+func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv2MulticastListenerReportMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv2MulticastListenerQueryMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/google/gopacket/layers/modbustcp.go
new file mode 100644
index 0000000..bafbd74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/modbustcp.go
@@ -0,0 +1,150 @@
+// Copyright 2018, The GoPacket Authors, All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// ModbusTCP Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for ModbusTCP.
+//
+//******************************************************************************
+
+const mbapRecordSizeInBytes int = 7
+const modbusPDUMinimumRecordSizeInBytes int = 2
+const modbusPDUMaximumRecordSizeInBytes int = 253
+
+// ModbusProtocol type
+type ModbusProtocol uint16
+
+// ModbusProtocol known values.
+const (
+	ModbusProtocolModbus ModbusProtocol = 0
+)
+
+func (mp ModbusProtocol) String() string {
+	switch mp {
+	default:
+		return "Unknown"
+	case ModbusProtocolModbus:
+		return "Modbus"
+	}
+}
+
+//******************************************************************************
+
+// ModbusTCP Type
+// --------
+// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object
+// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP
+// payload in an ModbusTCP TCP packet.
+//
+type ModbusTCP struct {
+	BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes .
+
+	TransactionIdentifier uint16         // Identification of a MODBUS Request/Response transaction
+	ProtocolIdentifier    ModbusProtocol // It is used for intra-system multiplexing
+	Length                uint16         // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length
+	UnitIdentifier        uint8          // Identification of a remote slave connected on a serial line or on other buses
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.
+func (d *ModbusTCP) LayerType() gopacket.LayerType {
+	return LayerTypeModbusTCP
+}
+
+//******************************************************************************
+
+// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the ModbusTCP layer.
+func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &ModbusTCP{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return p.NextDecoder(d.NextLayerType())
+
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// Upon succeeds, it loads the ModbusTCP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a MBAP record, then return an error.
+	if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet too short")
+	}
+
+	if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet too long")
+	}
+
+	// ModbusTCP type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level (MPBA).
+	//    Payload is supposed to contain the payload of this level (PDU).
+	d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// The fields can just be copied in big endian order.
+	d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])
+	d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4]))
+	d.Length = binary.BigEndian.Uint16(data[4:6])
+
+	// Length should have the size of the payload plus one byte (size of UnitIdentifier)
+	if d.Length != uint16(len(d.BaseLayer.Payload)+1) {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet with wrong field value (Length)")
+	}
+	d.UnitIdentifier = uint8(data[6])
+
+	return nil
+}
+
+//******************************************************************************
+
+// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.
+func (d *ModbusTCP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+//******************************************************************************
+
+// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets
+func (d *ModbusTCP) Payload() []byte {
+	return d.BaseLayer.Payload
+}
diff --git a/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/google/gopacket/layers/mpls.go
new file mode 100644
index 0000000..83079a0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mpls.go
@@ -0,0 +1,87 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+// MPLS is the MPLS packet header.
+type MPLS struct {
+	BaseLayer
+	Label        uint32
+	TrafficClass uint8
+	StackBottom  bool
+	TTL          uint8
+}
+
+// LayerType returns gopacket.LayerTypeMPLS.
+func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS }
+
+// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's
+// given, then decode the packet accordingly.  Its algorithm for guessing is:
+//  If the packet starts with byte 0x45-0x4F: IPv4
+//  If the packet starts with byte 0x60-0x6F: IPv6
+//  Otherwise:  Error
+// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach.
+type ProtocolGuessingDecoder struct{}
+
+func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error {
+	switch data[0] {
+	// 0x40 | header_len, where header_len is at least 5.
+	case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f:
+		return decodeIPv4(data, p)
+		// IPv6 can start with any byte whose first 4 bits are 0x6.
+	case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f:
+		return decodeIPv6(data, p)
+	}
+	return errors.New("Unable to guess protocol of packet data")
+}
+
+// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS
+// layer.  MPLS contains no type information, so we have to explicitly decide
+// which decoder to use.  This is initially set to ProtocolGuessingDecoder, our
+// simple attempt at guessing protocols based on the first few bytes of data
+// available to us.  However, if you know that in your environment MPLS always
+// encapsulates a specific protocol, you may reset this.
+var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{}
+
+func decodeMPLS(data []byte, p gopacket.PacketBuilder) error {
+	decoded := binary.BigEndian.Uint32(data[:4])
+	mpls := &MPLS{
+		Label:        decoded >> 12,
+		TrafficClass: uint8(decoded>>9) & 0x7,
+		StackBottom:  decoded&0x100 != 0,
+		TTL:          uint8(decoded),
+		BaseLayer:    BaseLayer{data[:4], data[4:]},
+	}
+	p.AddLayer(mpls)
+	if mpls.StackBottom {
+		return p.NextDecoder(MPLSPayloadDecoder)
+	}
+	return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS))
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	encoded := m.Label << 12
+	encoded |= uint32(m.TrafficClass) << 9
+	encoded |= uint32(m.TTL)
+	if m.StackBottom {
+		encoded |= 0x100
+	}
+	binary.BigEndian.PutUint32(bytes, encoded)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/google/gopacket/layers/ndp.go
new file mode 100644
index 0000000..f7ca1b2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ndp.go
@@ -0,0 +1,611 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c
+
+package layers
+
+import (
+	"fmt"
+	"github.com/google/gopacket"
+	"net"
+)
+
+type NDPChassisType uint8
+
+// Nortel Chassis Types
+const (
+	NDPChassisother                                       NDPChassisType = 1
+	NDPChassis3000                                        NDPChassisType = 2
+	NDPChassis3030                                        NDPChassisType = 3
+	NDPChassis2310                                        NDPChassisType = 4
+	NDPChassis2810                                        NDPChassisType = 5
+	NDPChassis2912                                        NDPChassisType = 6
+	NDPChassis2914                                        NDPChassisType = 7
+	NDPChassis271x                                        NDPChassisType = 8
+	NDPChassis2813                                        NDPChassisType = 9
+	NDPChassis2814                                        NDPChassisType = 10
+	NDPChassis2915                                        NDPChassisType = 11
+	NDPChassis5000                                        NDPChassisType = 12
+	NDPChassis2813SA                                      NDPChassisType = 13
+	NDPChassis2814SA                                      NDPChassisType = 14
+	NDPChassis810M                                        NDPChassisType = 15
+	NDPChassisEthercell                                   NDPChassisType = 16
+	NDPChassis5005                                        NDPChassisType = 17
+	NDPChassisAlcatelEWC                                  NDPChassisType = 18
+	NDPChassis2715SA                                      NDPChassisType = 20
+	NDPChassis2486                                        NDPChassisType = 21
+	NDPChassis28000series                                 NDPChassisType = 22
+	NDPChassis23000series                                 NDPChassisType = 23
+	NDPChassis5DN00xseries                                NDPChassisType = 24
+	NDPChassisBayStackEthernet                            NDPChassisType = 25
+	NDPChassis23100series                                 NDPChassisType = 26
+	NDPChassis100BaseTHub                                 NDPChassisType = 27
+	NDPChassis3000FastEthernet                            NDPChassisType = 28
+	NDPChassisOrionSwitch                                 NDPChassisType = 29
+	NDPChassisDDS                                         NDPChassisType = 31
+	NDPChassisCentillion6slot                             NDPChassisType = 32
+	NDPChassisCentillion12slot                            NDPChassisType = 33
+	NDPChassisCentillion1slot                             NDPChassisType = 34
+	NDPChassisBayStack301                                 NDPChassisType = 35
+	NDPChassisBayStackTokenRingHub                        NDPChassisType = 36
+	NDPChassisFVCMultimediaSwitch                         NDPChassisType = 37
+	NDPChassisSwitchNode                                  NDPChassisType = 38
+	NDPChassisBayStack302Switch                           NDPChassisType = 39
+	NDPChassisBayStack350Switch                           NDPChassisType = 40
+	NDPChassisBayStack150EthernetHub                      NDPChassisType = 41
+	NDPChassisCentillion50NSwitch                         NDPChassisType = 42
+	NDPChassisCentillion50TSwitch                         NDPChassisType = 43
+	NDPChassisBayStack303304Switches                      NDPChassisType = 44
+	NDPChassisBayStack200EthernetHub                      NDPChassisType = 45
+	NDPChassisBayStack25010100EthernetHub                 NDPChassisType = 46
+	NDPChassisBayStack450101001000Switches                NDPChassisType = 48
+	NDPChassisBayStack41010100Switches                    NDPChassisType = 49
+	NDPChassisPassport1200L3Switch                        NDPChassisType = 50
+	NDPChassisPassport1250L3Switch                        NDPChassisType = 51
+	NDPChassisPassport1100L3Switch                        NDPChassisType = 52
+	NDPChassisPassport1150L3Switch                        NDPChassisType = 53
+	NDPChassisPassport1050L3Switch                        NDPChassisType = 54
+	NDPChassisPassport1051L3Switch                        NDPChassisType = 55
+	NDPChassisPassport8610L3Switch                        NDPChassisType = 56
+	NDPChassisPassport8606L3Switch                        NDPChassisType = 57
+	NDPChassisPassport8010                                NDPChassisType = 58
+	NDPChassisPassport8006                                NDPChassisType = 59
+	NDPChassisBayStack670wirelessaccesspoint              NDPChassisType = 60
+	NDPChassisPassport740                                 NDPChassisType = 61
+	NDPChassisPassport750                                 NDPChassisType = 62
+	NDPChassisPassport790                                 NDPChassisType = 63
+	NDPChassisBusinessPolicySwitch200010100Switches       NDPChassisType = 64
+	NDPChassisPassport8110L2Switch                        NDPChassisType = 65
+	NDPChassisPassport8106L2Switch                        NDPChassisType = 66
+	NDPChassisBayStack3580GigSwitch                       NDPChassisType = 67
+	NDPChassisBayStack10PowerSupplyUnit                   NDPChassisType = 68
+	NDPChassisBayStack42010100Switch                      NDPChassisType = 69
+	NDPChassisOPTeraMetro1200EthernetServiceModule        NDPChassisType = 70
+	NDPChassisOPTera8010co                                NDPChassisType = 71
+	NDPChassisOPTera8610coL3Switch                        NDPChassisType = 72
+	NDPChassisOPTera8110coL2Switch                        NDPChassisType = 73
+	NDPChassisOPTera8003                                  NDPChassisType = 74
+	NDPChassisOPTera8603L3Switch                          NDPChassisType = 75
+	NDPChassisOPTera8103L2Switch                          NDPChassisType = 76
+	NDPChassisBayStack380101001000Switch                  NDPChassisType = 77
+	NDPChassisEthernetSwitch47048T                        NDPChassisType = 78
+	NDPChassisOPTeraMetro1450EthernetServiceModule        NDPChassisType = 79
+	NDPChassisOPTeraMetro1400EthernetServiceModule        NDPChassisType = 80
+	NDPChassisAlteonSwitchFamily                          NDPChassisType = 81
+	NDPChassisEthernetSwitch46024TPWR                     NDPChassisType = 82
+	NDPChassisOPTeraMetro8010OPML2Switch                  NDPChassisType = 83
+	NDPChassisOPTeraMetro8010coOPML2Switch                NDPChassisType = 84
+	NDPChassisOPTeraMetro8006OPML2Switch                  NDPChassisType = 85
+	NDPChassisOPTeraMetro8003OPML2Switch                  NDPChassisType = 86
+	NDPChassisAlteon180e                                  NDPChassisType = 87
+	NDPChassisAlteonAD3                                   NDPChassisType = 88
+	NDPChassisAlteon184                                   NDPChassisType = 89
+	NDPChassisAlteonAD4                                   NDPChassisType = 90
+	NDPChassisPassport1424L3Switch                        NDPChassisType = 91
+	NDPChassisPassport1648L3Switch                        NDPChassisType = 92
+	NDPChassisPassport1612L3Switch                        NDPChassisType = 93
+	NDPChassisPassport1624L3Switch                        NDPChassisType = 94
+	NDPChassisBayStack38024FFiber1000Switch               NDPChassisType = 95
+	NDPChassisEthernetRoutingSwitch551024T                NDPChassisType = 96
+	NDPChassisEthernetRoutingSwitch551048T                NDPChassisType = 97
+	NDPChassisEthernetSwitch47024T                        NDPChassisType = 98
+	NDPChassisNortelNetworksWirelessLANAccessPoint2220    NDPChassisType = 99
+	NDPChassisPassportRBS2402L3Switch                     NDPChassisType = 100
+	NDPChassisAlteonApplicationSwitch2424                 NDPChassisType = 101
+	NDPChassisAlteonApplicationSwitch2224                 NDPChassisType = 102
+	NDPChassisAlteonApplicationSwitch2208                 NDPChassisType = 103
+	NDPChassisAlteonApplicationSwitch2216                 NDPChassisType = 104
+	NDPChassisAlteonApplicationSwitch3408                 NDPChassisType = 105
+	NDPChassisAlteonApplicationSwitch3416                 NDPChassisType = 106
+	NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107
+	NDPChassisEthernetSwitch42548T                        NDPChassisType = 108
+	NDPChassisEthernetSwitch42524T                        NDPChassisType = 109
+	NDPChassisNortelNetworksWirelessLANAccessPoint2221    NDPChassisType = 110
+	NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch  NDPChassisType = 111
+	NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112
+	NDPChassisPassport830010slotchassis                   NDPChassisType = 113
+	NDPChassisPassport83006slotchassis                    NDPChassisType = 114
+	NDPChassisEthernetRoutingSwitch552024TPWR             NDPChassisType = 115
+	NDPChassisEthernetRoutingSwitch552048TPWR             NDPChassisType = 116
+	NDPChassisNortelNetworksVPNGateway3050                NDPChassisType = 117
+	NDPChassisAlteonSSL31010100                           NDPChassisType = 118
+	NDPChassisAlteonSSL31010100Fiber                      NDPChassisType = 119
+	NDPChassisAlteonSSL31010100FIPS                       NDPChassisType = 120
+	NDPChassisAlteonSSL410101001000                       NDPChassisType = 121
+	NDPChassisAlteonSSL410101001000Fiber                  NDPChassisType = 122
+	NDPChassisAlteonApplicationSwitch2424SSL              NDPChassisType = 123
+	NDPChassisEthernetSwitch32524T                        NDPChassisType = 124
+	NDPChassisEthernetSwitch32524G                        NDPChassisType = 125
+	NDPChassisNortelNetworksWirelessLANAccessPoint2225    NDPChassisType = 126
+	NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127
+	NDPChassis24portEthernetSwitch47024TPWR               NDPChassisType = 128
+	NDPChassis48portEthernetSwitch47048TPWR               NDPChassisType = 129
+	NDPChassisEthernetRoutingSwitch553024TFD              NDPChassisType = 130
+	NDPChassisEthernetSwitch351024T                       NDPChassisType = 131
+	NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132
+	NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133
+	NDPChassisNortelSecureAccessSwitch                    NDPChassisType = 134
+	NDPChassisNortelNetworksVPNGateway3070                NDPChassisType = 135
+	NDPChassisOPTeraMetro3500                             NDPChassisType = 136
+	NDPChassisSMBBES101024T                               NDPChassisType = 137
+	NDPChassisSMBBES101048T                               NDPChassisType = 138
+	NDPChassisSMBBES102024TPWR                            NDPChassisType = 139
+	NDPChassisSMBBES102048TPWR                            NDPChassisType = 140
+	NDPChassisSMBBES201024T                               NDPChassisType = 141
+	NDPChassisSMBBES201048T                               NDPChassisType = 142
+	NDPChassisSMBBES202024TPWR                            NDPChassisType = 143
+	NDPChassisSMBBES202048TPWR                            NDPChassisType = 144
+	NDPChassisSMBBES11024T                                NDPChassisType = 145
+	NDPChassisSMBBES11048T                                NDPChassisType = 146
+	NDPChassisSMBBES12024TPWR                             NDPChassisType = 147
+	NDPChassisSMBBES12048TPWR                             NDPChassisType = 148
+	NDPChassisSMBBES21024T                                NDPChassisType = 149
+	NDPChassisSMBBES21048T                                NDPChassisType = 150
+	NDPChassisSMBBES22024TPWR                             NDPChassisType = 151
+	NDPChassisSMBBES22048TPWR                             NDPChassisType = 152
+	NDPChassisOME6500                                     NDPChassisType = 153
+	NDPChassisEthernetRoutingSwitch4548GT                 NDPChassisType = 154
+	NDPChassisEthernetRoutingSwitch4548GTPWR              NDPChassisType = 155
+	NDPChassisEthernetRoutingSwitch4550T                  NDPChassisType = 156
+	NDPChassisEthernetRoutingSwitch4550TPWR               NDPChassisType = 157
+	NDPChassisEthernetRoutingSwitch4526FX                 NDPChassisType = 158
+	NDPChassisEthernetRoutingSwitch250026T                NDPChassisType = 159
+	NDPChassisEthernetRoutingSwitch250026TPWR             NDPChassisType = 160
+	NDPChassisEthernetRoutingSwitch250050T                NDPChassisType = 161
+	NDPChassisEthernetRoutingSwitch250050TPWR             NDPChassisType = 162
+)
+
+type NDPBackplaneType uint8
+
+// Nortel Backplane Types
+const (
+	NDPBackplaneOther                                       NDPBackplaneType = 1
+	NDPBackplaneEthernet                                    NDPBackplaneType = 2
+	NDPBackplaneEthernetTokenring                           NDPBackplaneType = 3
+	NDPBackplaneEthernetFDDI                                NDPBackplaneType = 4
+	NDPBackplaneEthernetTokenringFDDI                       NDPBackplaneType = 5
+	NDPBackplaneEthernetTokenringRedundantPower             NDPBackplaneType = 6
+	NDPBackplaneEthernetTokenringFDDIRedundantPower         NDPBackplaneType = 7
+	NDPBackplaneTokenRing                                   NDPBackplaneType = 8
+	NDPBackplaneEthernetTokenringFastEthernet               NDPBackplaneType = 9
+	NDPBackplaneEthernetFastEthernet                        NDPBackplaneType = 10
+	NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11
+	NDPBackplaneEthernetFastEthernetGigabitEthernet         NDPBackplaneType = 12
+)
+
+type NDPState uint8
+
+// Device State
+const (
+	NDPStateTopology  NDPState = 1
+	NDPStateHeartbeat NDPState = 2
+	NDPStateNew       NDPState = 3
+)
+
+// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol.
+type NortelDiscovery struct {
+	BaseLayer
+	IPAddress net.IP
+	SegmentID []byte
+	Chassis   NDPChassisType
+	Backplane NDPBackplaneType
+	State     NDPState
+	NumLinks  uint8
+}
+
+// LayerType returns gopacket.LayerTypeNortelDiscovery.
+func (c *NortelDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeNortelDiscovery
+}
+
+func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	c := &NortelDiscovery{}
+	if len(data) < 11 {
+		return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data))
+	}
+	c.IPAddress = data[0:4]
+	c.SegmentID = data[4:7]
+	c.Chassis = NDPChassisType(data[7])
+	c.Backplane = NDPBackplaneType(data[8])
+	c.State = NDPState(data[9])
+	c.NumLinks = uint8(data[10])
+	p.AddLayer(c)
+	return nil
+}
+
+func (t NDPChassisType) String() (s string) {
+	switch t {
+	case NDPChassisother:
+		s = "other"
+	case NDPChassis3000:
+		s = "3000"
+	case NDPChassis3030:
+		s = "3030"
+	case NDPChassis2310:
+		s = "2310"
+	case NDPChassis2810:
+		s = "2810"
+	case NDPChassis2912:
+		s = "2912"
+	case NDPChassis2914:
+		s = "2914"
+	case NDPChassis271x:
+		s = "271x"
+	case NDPChassis2813:
+		s = "2813"
+	case NDPChassis2814:
+		s = "2814"
+	case NDPChassis2915:
+		s = "2915"
+	case NDPChassis5000:
+		s = "5000"
+	case NDPChassis2813SA:
+		s = "2813SA"
+	case NDPChassis2814SA:
+		s = "2814SA"
+	case NDPChassis810M:
+		s = "810M"
+	case NDPChassisEthercell:
+		s = "Ethercell"
+	case NDPChassis5005:
+		s = "5005"
+	case NDPChassisAlcatelEWC:
+		s = "Alcatel Ethernet workgroup conc."
+	case NDPChassis2715SA:
+		s = "2715SA"
+	case NDPChassis2486:
+		s = "2486"
+	case NDPChassis28000series:
+		s = "28000 series"
+	case NDPChassis23000series:
+		s = "23000 series"
+	case NDPChassis5DN00xseries:
+		s = "5DN00x series"
+	case NDPChassisBayStackEthernet:
+		s = "BayStack Ethernet"
+	case NDPChassis23100series:
+		s = "23100 series"
+	case NDPChassis100BaseTHub:
+		s = "100Base-T Hub"
+	case NDPChassis3000FastEthernet:
+		s = "3000 Fast Ethernet"
+	case NDPChassisOrionSwitch:
+		s = "Orion switch"
+	case NDPChassisDDS:
+		s = "DDS"
+	case NDPChassisCentillion6slot:
+		s = "Centillion (6 slot)"
+	case NDPChassisCentillion12slot:
+		s = "Centillion (12 slot)"
+	case NDPChassisCentillion1slot:
+		s = "Centillion (1 slot)"
+	case NDPChassisBayStack301:
+		s = "BayStack 301"
+	case NDPChassisBayStackTokenRingHub:
+		s = "BayStack TokenRing Hub"
+	case NDPChassisFVCMultimediaSwitch:
+		s = "FVC Multimedia Switch"
+	case NDPChassisSwitchNode:
+		s = "Switch Node"
+	case NDPChassisBayStack302Switch:
+		s = "BayStack 302 Switch"
+	case NDPChassisBayStack350Switch:
+		s = "BayStack 350 Switch"
+	case NDPChassisBayStack150EthernetHub:
+		s = "BayStack 150 Ethernet Hub"
+	case NDPChassisCentillion50NSwitch:
+		s = "Centillion 50N switch"
+	case NDPChassisCentillion50TSwitch:
+		s = "Centillion 50T switch"
+	case NDPChassisBayStack303304Switches:
+		s = "BayStack 303 and 304 Switches"
+	case NDPChassisBayStack200EthernetHub:
+		s = "BayStack 200 Ethernet Hub"
+	case NDPChassisBayStack25010100EthernetHub:
+		s = "BayStack 250 10/100 Ethernet Hub"
+	case NDPChassisBayStack450101001000Switches:
+		s = "BayStack 450 10/100/1000 Switches"
+	case NDPChassisBayStack41010100Switches:
+		s = "BayStack 410 10/100 Switches"
+	case NDPChassisPassport1200L3Switch:
+		s = "Passport 1200 L3 Switch"
+	case NDPChassisPassport1250L3Switch:
+		s = "Passport 1250 L3 Switch"
+	case NDPChassisPassport1100L3Switch:
+		s = "Passport 1100 L3 Switch"
+	case NDPChassisPassport1150L3Switch:
+		s = "Passport 1150 L3 Switch"
+	case NDPChassisPassport1050L3Switch:
+		s = "Passport 1050 L3 Switch"
+	case NDPChassisPassport1051L3Switch:
+		s = "Passport 1051 L3 Switch"
+	case NDPChassisPassport8610L3Switch:
+		s = "Passport 8610 L3 Switch"
+	case NDPChassisPassport8606L3Switch:
+		s = "Passport 8606 L3 Switch"
+	case NDPChassisPassport8010:
+		s = "Passport 8010"
+	case NDPChassisPassport8006:
+		s = "Passport 8006"
+	case NDPChassisBayStack670wirelessaccesspoint:
+		s = "BayStack 670 wireless access point"
+	case NDPChassisPassport740:
+		s = "Passport 740"
+	case NDPChassisPassport750:
+		s = "Passport 750"
+	case NDPChassisPassport790:
+		s = "Passport 790"
+	case NDPChassisBusinessPolicySwitch200010100Switches:
+		s = "Business Policy Switch 2000 10/100 Switches"
+	case NDPChassisPassport8110L2Switch:
+		s = "Passport 8110 L2 Switch"
+	case NDPChassisPassport8106L2Switch:
+		s = "Passport 8106 L2 Switch"
+	case NDPChassisBayStack3580GigSwitch:
+		s = "BayStack 3580 Gig Switch"
+	case NDPChassisBayStack10PowerSupplyUnit:
+		s = "BayStack 10 Power Supply Unit"
+	case NDPChassisBayStack42010100Switch:
+		s = "BayStack 420 10/100 Switch"
+	case NDPChassisOPTeraMetro1200EthernetServiceModule:
+		s = "OPTera Metro 1200 Ethernet Service Module"
+	case NDPChassisOPTera8010co:
+		s = "OPTera 8010co"
+	case NDPChassisOPTera8610coL3Switch:
+		s = "OPTera 8610co L3 switch"
+	case NDPChassisOPTera8110coL2Switch:
+		s = "OPTera 8110co L2 switch"
+	case NDPChassisOPTera8003:
+		s = "OPTera 8003"
+	case NDPChassisOPTera8603L3Switch:
+		s = "OPTera 8603 L3 switch"
+	case NDPChassisOPTera8103L2Switch:
+		s = "OPTera 8103 L2 switch"
+	case NDPChassisBayStack380101001000Switch:
+		s = "BayStack 380 10/100/1000 Switch"
+	case NDPChassisEthernetSwitch47048T:
+		s = "Ethernet Switch 470-48T"
+	case NDPChassisOPTeraMetro1450EthernetServiceModule:
+		s = "OPTera Metro 1450 Ethernet Service Module"
+	case NDPChassisOPTeraMetro1400EthernetServiceModule:
+		s = "OPTera Metro 1400 Ethernet Service Module"
+	case NDPChassisAlteonSwitchFamily:
+		s = "Alteon Switch Family"
+	case NDPChassisEthernetSwitch46024TPWR:
+		s = "Ethernet Switch 460-24T-PWR"
+	case NDPChassisOPTeraMetro8010OPML2Switch:
+		s = "OPTera Metro 8010 OPM L2 Switch"
+	case NDPChassisOPTeraMetro8010coOPML2Switch:
+		s = "OPTera Metro 8010co OPM L2 Switch"
+	case NDPChassisOPTeraMetro8006OPML2Switch:
+		s = "OPTera Metro 8006 OPM L2 Switch"
+	case NDPChassisOPTeraMetro8003OPML2Switch:
+		s = "OPTera Metro 8003 OPM L2 Switch"
+	case NDPChassisAlteon180e:
+		s = "Alteon 180e"
+	case NDPChassisAlteonAD3:
+		s = "Alteon AD3"
+	case NDPChassisAlteon184:
+		s = "Alteon 184"
+	case NDPChassisAlteonAD4:
+		s = "Alteon AD4"
+	case NDPChassisPassport1424L3Switch:
+		s = "Passport 1424 L3 switch"
+	case NDPChassisPassport1648L3Switch:
+		s = "Passport 1648 L3 switch"
+	case NDPChassisPassport1612L3Switch:
+		s = "Passport 1612 L3 switch"
+	case NDPChassisPassport1624L3Switch:
+		s = "Passport 1624 L3 switch"
+	case NDPChassisBayStack38024FFiber1000Switch:
+		s = "BayStack 380-24F Fiber 1000 Switch"
+	case NDPChassisEthernetRoutingSwitch551024T:
+		s = "Ethernet Routing Switch 5510-24T"
+	case NDPChassisEthernetRoutingSwitch551048T:
+		s = "Ethernet Routing Switch 5510-48T"
+	case NDPChassisEthernetSwitch47024T:
+		s = "Ethernet Switch 470-24T"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2220:
+		s = "Nortel Networks Wireless LAN Access Point 2220"
+	case NDPChassisPassportRBS2402L3Switch:
+		s = "Passport RBS 2402 L3 switch"
+	case NDPChassisAlteonApplicationSwitch2424:
+		s = "Alteon Application Switch 2424"
+	case NDPChassisAlteonApplicationSwitch2224:
+		s = "Alteon Application Switch 2224"
+	case NDPChassisAlteonApplicationSwitch2208:
+		s = "Alteon Application Switch 2208"
+	case NDPChassisAlteonApplicationSwitch2216:
+		s = "Alteon Application Switch 2216"
+	case NDPChassisAlteonApplicationSwitch3408:
+		s = "Alteon Application Switch 3408"
+	case NDPChassisAlteonApplicationSwitch3416:
+		s = "Alteon Application Switch 3416"
+	case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250:
+		s = "Nortel Networks Wireless LAN SecuritySwitch 2250"
+	case NDPChassisEthernetSwitch42548T:
+		s = "Ethernet Switch 425-48T"
+	case NDPChassisEthernetSwitch42524T:
+		s = "Ethernet Switch 425-24T"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2221:
+		s = "Nortel Networks Wireless LAN Access Point 2221"
+	case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch:
+		s = "Nortel Metro Ethernet Service Unit 24-T SPF switch"
+	case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch:
+		s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch"
+	case NDPChassisPassport830010slotchassis:
+		s = "Passport 8300 10-slot chassis"
+	case NDPChassisPassport83006slotchassis:
+		s = "Passport 8300 6-slot chassis"
+	case NDPChassisEthernetRoutingSwitch552024TPWR:
+		s = "Ethernet Routing Switch 5520-24T-PWR"
+	case NDPChassisEthernetRoutingSwitch552048TPWR:
+		s = "Ethernet Routing Switch 5520-48T-PWR"
+	case NDPChassisNortelNetworksVPNGateway3050:
+		s = "Nortel Networks VPN Gateway 3050"
+	case NDPChassisAlteonSSL31010100:
+		s = "Alteon SSL 310 10/100"
+	case NDPChassisAlteonSSL31010100Fiber:
+		s = "Alteon SSL 310 10/100 Fiber"
+	case NDPChassisAlteonSSL31010100FIPS:
+		s = "Alteon SSL 310 10/100 FIPS"
+	case NDPChassisAlteonSSL410101001000:
+		s = "Alteon SSL 410 10/100/1000"
+	case NDPChassisAlteonSSL410101001000Fiber:
+		s = "Alteon SSL 410 10/100/1000 Fiber"
+	case NDPChassisAlteonApplicationSwitch2424SSL:
+		s = "Alteon Application Switch 2424-SSL"
+	case NDPChassisEthernetSwitch32524T:
+		s = "Ethernet Switch 325-24T"
+	case NDPChassisEthernetSwitch32524G:
+		s = "Ethernet Switch 325-24G"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2225:
+		s = "Nortel Networks Wireless LAN Access Point 2225"
+	case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270:
+		s = "Nortel Networks Wireless LAN SecuritySwitch 2270"
+	case NDPChassis24portEthernetSwitch47024TPWR:
+		s = "24-port Ethernet Switch 470-24T-PWR"
+	case NDPChassis48portEthernetSwitch47048TPWR:
+		s = "48-port Ethernet Switch 470-48T-PWR"
+	case NDPChassisEthernetRoutingSwitch553024TFD:
+		s = "Ethernet Routing Switch 5530-24TFD"
+	case NDPChassisEthernetSwitch351024T:
+		s = "Ethernet Switch 3510-24T"
+	case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch:
+		s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch"
+	case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch:
+		s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch"
+	case NDPChassisNortelSecureAccessSwitch:
+		s = "Nortel Secure Access Switch"
+	case NDPChassisNortelNetworksVPNGateway3070:
+		s = "Nortel Networks VPN Gateway 3070"
+	case NDPChassisOPTeraMetro3500:
+		s = "OPTera Metro 3500"
+	case NDPChassisSMBBES101024T:
+		s = "SMB BES 1010 24T"
+	case NDPChassisSMBBES101048T:
+		s = "SMB BES 1010 48T"
+	case NDPChassisSMBBES102024TPWR:
+		s = "SMB BES 1020 24T PWR"
+	case NDPChassisSMBBES102048TPWR:
+		s = "SMB BES 1020 48T PWR"
+	case NDPChassisSMBBES201024T:
+		s = "SMB BES 2010 24T"
+	case NDPChassisSMBBES201048T:
+		s = "SMB BES 2010 48T"
+	case NDPChassisSMBBES202024TPWR:
+		s = "SMB BES 2020 24T PWR"
+	case NDPChassisSMBBES202048TPWR:
+		s = "SMB BES 2020 48T PWR"
+	case NDPChassisSMBBES11024T:
+		s = "SMB BES 110 24T"
+	case NDPChassisSMBBES11048T:
+		s = "SMB BES 110 48T"
+	case NDPChassisSMBBES12024TPWR:
+		s = "SMB BES 120 24T PWR"
+	case NDPChassisSMBBES12048TPWR:
+		s = "SMB BES 120 48T PWR"
+	case NDPChassisSMBBES21024T:
+		s = "SMB BES 210 24T"
+	case NDPChassisSMBBES21048T:
+		s = "SMB BES 210 48T"
+	case NDPChassisSMBBES22024TPWR:
+		s = "SMB BES 220 24T PWR"
+	case NDPChassisSMBBES22048TPWR:
+		s = "SMB BES 220 48T PWR"
+	case NDPChassisOME6500:
+		s = "OME 6500"
+	case NDPChassisEthernetRoutingSwitch4548GT:
+		s = "Ethernet Routing Switch 4548GT"
+	case NDPChassisEthernetRoutingSwitch4548GTPWR:
+		s = "Ethernet Routing Switch 4548GT-PWR"
+	case NDPChassisEthernetRoutingSwitch4550T:
+		s = "Ethernet Routing Switch 4550T"
+	case NDPChassisEthernetRoutingSwitch4550TPWR:
+		s = "Ethernet Routing Switch 4550T-PWR"
+	case NDPChassisEthernetRoutingSwitch4526FX:
+		s = "Ethernet Routing Switch 4526FX"
+	case NDPChassisEthernetRoutingSwitch250026T:
+		s = "Ethernet Routing Switch 2500-26T"
+	case NDPChassisEthernetRoutingSwitch250026TPWR:
+		s = "Ethernet Routing Switch 2500-26T-PWR"
+	case NDPChassisEthernetRoutingSwitch250050T:
+		s = "Ethernet Routing Switch 2500-50T"
+	case NDPChassisEthernetRoutingSwitch250050TPWR:
+		s = "Ethernet Routing Switch 2500-50T-PWR"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t NDPBackplaneType) String() (s string) {
+	switch t {
+	case NDPBackplaneOther:
+		s = "Other"
+	case NDPBackplaneEthernet:
+		s = "Ethernet"
+	case NDPBackplaneEthernetTokenring:
+		s = "Ethernet and Tokenring"
+	case NDPBackplaneEthernetFDDI:
+		s = "Ethernet and FDDI"
+	case NDPBackplaneEthernetTokenringFDDI:
+		s = "Ethernet, Tokenring and FDDI"
+	case NDPBackplaneEthernetTokenringRedundantPower:
+		s = "Ethernet and Tokenring with redundant power"
+	case NDPBackplaneEthernetTokenringFDDIRedundantPower:
+		s = "Ethernet, Tokenring, FDDI with redundant power"
+	case NDPBackplaneTokenRing:
+		s = "Token Ring"
+	case NDPBackplaneEthernetTokenringFastEthernet:
+		s = "Ethernet, Tokenring and Fast Ethernet"
+	case NDPBackplaneEthernetFastEthernet:
+		s = "Ethernet and Fast Ethernet"
+	case NDPBackplaneEthernetTokenringFastEthernetRedundantPower:
+		s = "Ethernet, Tokenring, Fast Ethernet with redundant power"
+	case NDPBackplaneEthernetFastEthernetGigabitEthernet:
+		s = "Ethernet, Fast Ethernet and Gigabit Ethernet"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t NDPState) String() (s string) {
+	switch t {
+	case NDPStateTopology:
+		s = "Topology Change"
+	case NDPStateHeartbeat:
+		s = "Heartbeat"
+	case NDPStateNew:
+		s = "New"
+	default:
+		s = "Unknown"
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/google/gopacket/layers/ntp.go
new file mode 100644
index 0000000..33c15b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ntp.go
@@ -0,0 +1,416 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// Network Time Protocol (NTP) Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for NTP.
+//
+//******************************************************************************
+//
+// About The Network Time Protocol (NTP)
+// -------------------------------------
+// NTP is a protocol that enables computers on the internet to set their
+// clocks to the correct time (or to a time that is acceptably close to the
+// correct time). NTP runs on top of UDP.
+//
+// There have been a series of versions of the NTP protocol. The latest
+// version is V4 and is specified in RFC 5905:
+//     http://www.ietf.org/rfc/rfc5905.txt
+//
+//******************************************************************************
+//
+// References
+// ----------
+//
+// Wikipedia's NTP entry:
+//     https://en.wikipedia.org/wiki/Network_Time_Protocol
+//     This is the best place to get an overview of NTP.
+//
+// Network Time Protocol Home Website:
+//     http://www.ntp.org/
+//     This appears to be the official website of NTP.
+//
+// List of current NTP Protocol RFCs:
+//     http://www.ntp.org/rfc.html
+//
+// RFC 958: "Network Time Protocol (NTP)" (1985)
+//     https://tools.ietf.org/html/rfc958
+//     This is the original NTP specification.
+//
+// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992)
+//     https://tools.ietf.org/html/rfc1305
+//     The protocol was updated in 1992 yielding NTP V3.
+//
+// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010)
+//     https://www.ietf.org/rfc/rfc5905.txt
+//     The protocol was updated in 2010 yielding NTP V4.
+//     V4 is backwards compatible with all previous versions of NTP.
+//
+// RFC 5906: "Network Time Protocol Version 4: Autokey Specification"
+//     https://tools.ietf.org/html/rfc5906
+//     This document addresses the security of the NTP protocol
+//     and is probably not relevant to this package.
+//
+// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)"
+//     https://tools.ietf.org/html/rfc5907
+//     This document addresses the management of NTP servers and
+//     is probably not relevant to this package.
+//
+// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6"
+//     https://tools.ietf.org/html/rfc5908
+//     This document addresses the use of NTP in DHCPv6 and is
+//     probably not relevant to this package.
+//
+// "Let's make a NTP Client in C"
+//     https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html
+//     This web page contains useful information about the details of NTP,
+//     including an NTP record struture in C, and C code.
+//
+// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)"
+//     http://what-when-how.com/computer-network-time-synchronization/
+//        ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/
+//     This web page contains useful information on the details of NTP.
+//
+// "Technical information - NTP Data Packet"
+//     https://www.meinbergglobal.com/english/info/ntp-packet.htm
+//     This page has a helpful diagram of an NTP V4 packet.
+//
+//******************************************************************************
+//
+// Obsolete References
+// -------------------
+//
+// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989)
+//     https://tools.ietf.org/html/rfc1119
+//     Version 2 was drafted in 1989.
+//     It is unclear whether V2 was ever implememented or whether the
+//     ideas ended up in V3 (which was implemented in 1992).
+//
+// RFC 1361: "Simple Network Time Protocol (SNTP)"
+//     https://tools.ietf.org/html/rfc1361
+//     This document is obsoleted by RFC 1769 and is included only for completeness.
+//
+// RFC 1769: "Simple Network Time Protocol (SNTP)"
+//     https://tools.ietf.org/html/rfc1769
+//     This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness.
+//
+// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+//     https://tools.ietf.org/html/rfc2030
+//     This document is obsoleted by RFC 4330 and is included only for completeness.
+//
+// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+//     https://tools.ietf.org/html/rfc4330
+//     This document is obsoleted by RFC 5905 and is included only for completeness.
+//
+//******************************************************************************
+//
+// Endian And Bit Numbering Issues
+// -------------------------------
+//
+// Endian and bit numbering issues can be confusing. Here is some
+// clarification:
+//
+//    ENDIAN: Values are sent big endian.
+//    https://en.wikipedia.org/wiki/Endianness
+//
+//    BIT NUMBERING: Bits are numbered 0 upwards from the most significant
+//    bit to the least significant bit. This means that if there is a 32-bit
+//    value, the most significant bit is called bit 0 and the least
+//    significant bit is called bit 31.
+//
+// See RFC 791 Appendix B for more discussion.
+//
+//******************************************************************************
+//
+// NTP V3 and V4 Packet Format
+// ---------------------------
+// NTP packets are UDP packets whose payload contains an NTP record.
+//
+// The NTP RFC defines the format of the NTP record.
+//
+// There have been four versions of the protocol:
+//
+//    V1 in 1985
+//    V2 in 1989
+//    V3 in 1992
+//    V4 in 2010
+//
+// It is clear that V1 and V2 are obsolete, and there is no need to
+// cater for these formats.
+//
+// V3 and V4 essentially use the same format, with V4 adding some optional
+// fields on the end. So this package supports the V3 and V4 formats.
+//
+// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains
+// the following diagram for the NTP record format:
+
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |LI | VN  |Mode |    Stratum    |     Poll      |   Precision   |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                         Root Delay                            |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                         Root Dispersion                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                          Reference ID                         |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                     Reference Timestamp (64)                  +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Origin Timestamp (64)                    +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Receive Timestamp (64)                   +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Transmit Timestamp (64)                  +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     .                                                               .
+//     .                    Extension Field 1 (variable)               .
+//     .                                                               .
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     .                                                               .
+//     .                    Extension Field 2 (variable)               .
+//     .                                                               .
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                          Key Identifier                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     |                            dgst (128)                         |
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     From http://www.ietf.org/rfc/rfc5905.txt
+//
+// The fields "Extension Field 1 (variable)" and later are optional fields,
+// and so we can set a minimum NTP record size of 48 bytes.
+//
+const ntpMinimumRecordSizeInBytes int = 48
+
+//******************************************************************************
+
+// NTP Type
+// --------
+// Type NTP implements the DecodingLayer interface. Each NTP object
+// represents in a structured form the NTP record present as the UDP
+// payload in an NTP UDP packet.
+//
+
+type NTPLeapIndicator uint8
+type NTPVersion uint8
+type NTPMode uint8
+type NTPStratum uint8
+type NTPLog2Seconds int8
+type NTPFixed16Seconds uint32
+type NTPReferenceID uint32
+type NTPTimestamp uint64
+
+type NTP struct {
+	BaseLayer // Stores the packet bytes and payload bytes.
+
+	LeapIndicator      NTPLeapIndicator  // [0,3]. Indicates whether leap second(s) is to be added.
+	Version            NTPVersion        // [0,7]. Version of the NTP protocol.
+	Mode               NTPMode           // [0,7]. Mode.
+	Stratum            NTPStratum        // [0,255]. Stratum of time server in the server tree.
+	Poll               NTPLog2Seconds    // [-128,127]. The maximum interval between successive messages, in log2 seconds.
+	Precision          NTPLog2Seconds    // [-128,127]. The precision of the system clock, in log2 seconds.
+	RootDelay          NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16.
+	RootDispersion     NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16.
+	ReferenceID        NTPReferenceID    // ID code of reference clock [0,2^32-1].
+	ReferenceTimestamp NTPTimestamp      // Most recent timestamp from the reference clock.
+	OriginTimestamp    NTPTimestamp      // Local time when request was sent from local host.
+	ReceiveTimestamp   NTPTimestamp      // Local time (on server) that request arrived at server host.
+	TransmitTimestamp  NTPTimestamp      // Local time (on server) that request departed server host.
+
+	// FIX: This package should analyse the extension fields and represent the extension fields too.
+	ExtensionBytes []byte // Just put extensions in a byte slice.
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the NTP object, which is LayerTypeNTP.
+func (d *NTP) LayerType() gopacket.LayerType {
+	return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// decodeNTP analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the NTP layer.
+func decodeNTP(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &NTP{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return nil
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// Upon succeeds, it loads the NTP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a NTP record, then return an error.
+	if len(data) < ntpMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("NTP packet too short")
+	}
+
+	// RFC 5905 does not appear to define a maximum NTP record length.
+	// The protocol allows "extension fields" to be included in the record,
+	// and states about these fields:"
+	//
+	//     "While the minimum field length containing required fields is
+	//      four words (16 octets), a maximum field length remains to be
+	//      established."
+	//
+	// For this reason, the packet length is not checked here for being too long.
+
+	// NTP type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level.
+	//    Payload is supposed to contain the payload of this level.
+	// Here we set the baselayer to be the bytes of the NTP record.
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// To make sense of this, refer to the packet diagram
+	// above and the section on endian conventions.
+
+	// The first few fields are all packed into the first 32 bits. Unpack them.
+	f := data[0]
+	d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6)
+	d.Version = NTPVersion((f & 0x38) >> 3)
+	d.Mode = NTPMode(f & 0x07)
+	d.Stratum = NTPStratum(data[1])
+	d.Poll = NTPLog2Seconds(data[2])
+	d.Precision = NTPLog2Seconds(data[3])
+
+	// The remaining fields can just be copied in big endian order.
+	d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8]))
+	d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12]))
+	d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16]))
+	d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24]))
+	d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32]))
+	d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40]))
+	d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48]))
+
+	// This layer does not attempt to analyse the extension bytes.
+	// But if there are any, we'd like the user to know. So we just
+	// place them all in an ExtensionBytes field.
+	d.ExtensionBytes = data[48:]
+
+	// Return no error.
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes)
+	if err != nil {
+		return err
+	}
+
+	// Pack the first few fields into the first 32 bits.
+	h := uint8(0)
+	h |= (uint8(d.LeapIndicator) << 6) & 0xC0
+	h |= (uint8(d.Version) << 3) & 0x38
+	h |= (uint8(d.Mode)) & 0x07
+	data[0] = byte(h)
+	data[1] = byte(d.Stratum)
+	data[2] = byte(d.Poll)
+	data[3] = byte(d.Precision)
+
+	// The remaining fields can just be copied in big endian order.
+	binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay))
+	binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion))
+	binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID))
+	binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp))
+	binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp))
+	binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp))
+	binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp))
+
+	ex, err := b.AppendBytes(len(d.ExtensionBytes))
+	if err != nil {
+		return err
+	}
+	copy(ex, d.ExtensionBytes)
+
+	return nil
+}
+
+//******************************************************************************
+
+// CanDecode returns a set of layers that NTP objects can decode.
+// As NTP objects can only decide the NTP layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *NTP) CanDecode() gopacket.LayerClass {
+	return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (NTP) layer. As NTP packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *NTP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+//******************************************************************************
+
+// NTP packets do not carry any data payload, so the empty byte slice is retured.
+// In Go, a nil slice is functionally identical to an empty slice, so we
+// return nil to avoid a heap allocation.
+func (d *NTP) Payload() []byte {
+	return nil
+}
+
+//******************************************************************************
+//*                            End Of NTP File                                 *
+//******************************************************************************
diff --git a/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/google/gopacket/layers/ospf.go
new file mode 100644
index 0000000..f3f2ca9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ospf.go
@@ -0,0 +1,680 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// OSPFType denotes what kind of OSPF type it is
+type OSPFType uint8
+
+// Potential values for OSPF.Type.
+const (
+	OSPFHello                   OSPFType = 1
+	OSPFDatabaseDescription     OSPFType = 2
+	OSPFLinkStateRequest        OSPFType = 3
+	OSPFLinkStateUpdate         OSPFType = 4
+	OSPFLinkStateAcknowledgment OSPFType = 5
+)
+
+// LSA Function Codes for LSAheader.LSType
+const (
+	RouterLSAtypeV2         = 0x1
+	RouterLSAtype           = 0x2001
+	NetworkLSAtypeV2        = 0x2
+	NetworkLSAtype          = 0x2002
+	SummaryLSANetworktypeV2 = 0x3
+	InterAreaPrefixLSAtype  = 0x2003
+	SummaryLSAASBRtypeV2    = 0x4
+	InterAreaRouterLSAtype  = 0x2004
+	ASExternalLSAtypeV2     = 0x5
+	ASExternalLSAtype       = 0x4005
+	NSSALSAtype             = 0x2007
+	LinkLSAtype             = 0x0008
+	IntraAreaPrefixLSAtype  = 0x2009
+)
+
+// String conversions for OSPFType
+func (i OSPFType) String() string {
+	switch i {
+	case OSPFHello:
+		return "Hello"
+	case OSPFDatabaseDescription:
+		return "Database Description"
+	case OSPFLinkStateRequest:
+		return "Link State Request"
+	case OSPFLinkStateUpdate:
+		return "Link State Update"
+	case OSPFLinkStateAcknowledgment:
+		return "Link State Acknowledgment"
+	default:
+		return ""
+	}
+}
+
+// Prefix extends IntraAreaPrefixLSA
+type Prefix struct {
+	PrefixLength  uint8
+	PrefixOptions uint8
+	Metric        uint16
+	AddressPrefix []byte
+}
+
+// IntraAreaPrefixLSA is the struct from RFC 5340  A.4.10.
+type IntraAreaPrefixLSA struct {
+	NumOfPrefixes  uint16
+	RefLSType      uint16
+	RefLinkStateID uint32
+	RefAdvRouter   uint32
+	Prefixes       []Prefix
+}
+
+// LinkLSA is the struct from RFC 5340  A.4.9.
+type LinkLSA struct {
+	RtrPriority      uint8
+	Options          uint32
+	LinkLocalAddress []byte
+	NumOfPrefixes    uint32
+	Prefixes         []Prefix
+}
+
+// ASExternalLSAV2 is the struct from RFC 2328  A.4.5.
+type ASExternalLSAV2 struct {
+	NetworkMask       uint32
+	ExternalBit       uint8
+	Metric            uint32
+	ForwardingAddress uint32
+	ExternalRouteTag  uint32
+}
+
+// ASExternalLSA is the struct from RFC 5340  A.4.7.
+type ASExternalLSA struct {
+	Flags             uint8
+	Metric            uint32
+	PrefixLength      uint8
+	PrefixOptions     uint8
+	RefLSType         uint16
+	AddressPrefix     []byte
+	ForwardingAddress []byte
+	ExternalRouteTag  uint32
+	RefLinkStateID    uint32
+}
+
+// InterAreaRouterLSA is the struct from RFC 5340  A.4.6.
+type InterAreaRouterLSA struct {
+	Options             uint32
+	Metric              uint32
+	DestinationRouterID uint32
+}
+
+// InterAreaPrefixLSA is the struct from RFC 5340  A.4.5.
+type InterAreaPrefixLSA struct {
+	Metric        uint32
+	PrefixLength  uint8
+	PrefixOptions uint8
+	AddressPrefix []byte
+}
+
+// NetworkLSA is the struct from RFC 5340  A.4.4.
+type NetworkLSA struct {
+	Options        uint32
+	AttachedRouter []uint32
+}
+
+// RouterV2 extends RouterLSAV2
+type RouterV2 struct {
+	Type     uint8
+	LinkID   uint32
+	LinkData uint32
+	Metric   uint16
+}
+
+// RouterLSAV2 is the struct from RFC 2328  A.4.2.
+type RouterLSAV2 struct {
+	Flags   uint8
+	Links   uint16
+	Routers []RouterV2
+}
+
+// Router extends RouterLSA
+type Router struct {
+	Type                uint8
+	Metric              uint16
+	InterfaceID         uint32
+	NeighborInterfaceID uint32
+	NeighborRouterID    uint32
+}
+
+// RouterLSA is the struct from RFC 5340  A.4.3.
+type RouterLSA struct {
+	Flags   uint8
+	Options uint32
+	Routers []Router
+}
+
+// LSAheader is the struct from RFC 5340  A.4.2 and RFC 2328 A.4.1.
+type LSAheader struct {
+	LSAge       uint16
+	LSType      uint16
+	LinkStateID uint32
+	AdvRouter   uint32
+	LSSeqNumber uint32
+	LSChecksum  uint16
+	Length      uint16
+	LSOptions   uint8
+}
+
+// LSA links LSAheader with the structs from RFC 5340  A.4.
+type LSA struct {
+	LSAheader
+	Content interface{}
+}
+
+// LSUpdate is the struct from RFC 5340  A.3.5.
+type LSUpdate struct {
+	NumOfLSAs uint32
+	LSAs      []LSA
+}
+
+// LSReq is the struct from RFC 5340  A.3.4.
+type LSReq struct {
+	LSType    uint16
+	LSID      uint32
+	AdvRouter uint32
+}
+
+// DbDescPkg is the struct from RFC 5340  A.3.3.
+type DbDescPkg struct {
+	Options      uint32
+	InterfaceMTU uint16
+	Flags        uint16
+	DDSeqNumber  uint32
+	LSAinfo      []LSAheader
+}
+
+// HelloPkg  is the struct from RFC 5340  A.3.2.
+type HelloPkg struct {
+	InterfaceID              uint32
+	RtrPriority              uint8
+	Options                  uint32
+	HelloInterval            uint16
+	RouterDeadInterval       uint32
+	DesignatedRouterID       uint32
+	BackupDesignatedRouterID uint32
+	NeighborID               []uint32
+}
+
+// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information
+type HelloPkgV2 struct {
+	HelloPkg
+	NetworkMask uint32
+}
+
+// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3.
+type OSPF struct {
+	Version      uint8
+	Type         OSPFType
+	PacketLength uint16
+	RouterID     uint32
+	AreaID       uint32
+	Checksum     uint16
+	Content      interface{}
+}
+
+//OSPFv2 extend the OSPF head with version 2 specific fields
+type OSPFv2 struct {
+	BaseLayer
+	OSPF
+	AuType         uint16
+	Authentication uint64
+}
+
+// OSPFv3 extend the OSPF head with version 3 specific fields
+type OSPFv3 struct {
+	BaseLayer
+	OSPF
+	Instance uint8
+	Reserved uint8
+}
+
+// getLSAsv2 parses the LSA information from the packet for OSPFv2
+func getLSAsv2(num uint32, data []byte) ([]LSA, error) {
+	var lsas []LSA
+	var i uint32 = 0
+	var offset uint32 = 0
+	for ; i < num; i++ {
+		lstype := uint16(data[offset+3])
+		lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+		content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+		if err != nil {
+			return nil, fmt.Errorf("Could not extract Link State type.")
+		}
+		lsa := LSA{
+			LSAheader: LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[offset : offset+2]),
+				LSOptions:   data[offset+2],
+				LSType:      lstype,
+				LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+				Length:      lsalength,
+			},
+			Content: content,
+		}
+		lsas = append(lsas, lsa)
+		offset += uint32(lsalength)
+	}
+	return lsas, nil
+}
+
+// extractLSAInformation extracts all the LSA information
+func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) {
+	if lsalength < 20 {
+		return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20)
+	}
+	if len(data) < int(lsalength) {
+		return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength)
+	}
+	var content interface{}
+	switch lstype {
+	case RouterLSAtypeV2:
+		var routers []RouterV2
+		links := binary.BigEndian.Uint16(data[22:24])
+		content = RouterLSAV2{
+			Flags:   data[20],
+			Links:   links,
+			Routers: routers,
+		}
+	case ASExternalLSAtypeV2:
+		content = ASExternalLSAV2{
+			NetworkMask:       binary.BigEndian.Uint32(data[20:24]),
+			ExternalBit:       data[24] & 0x80,
+			Metric:            binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+			ForwardingAddress: binary.BigEndian.Uint32(data[28:32]),
+			ExternalRouteTag:  binary.BigEndian.Uint32(data[32:36]),
+		}
+	case RouterLSAtype:
+		var routers []Router
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 16 {
+			router := Router{
+				Type:                uint8(data[j]),
+				Metric:              binary.BigEndian.Uint16(data[j+2 : j+4]),
+				InterfaceID:         binary.BigEndian.Uint32(data[j+4 : j+8]),
+				NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]),
+				NeighborRouterID:    binary.BigEndian.Uint32(data[j+12 : j+16]),
+			}
+			routers = append(routers, router)
+		}
+		content = RouterLSA{
+			Flags:   uint8(data[20]),
+			Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			Routers: routers,
+		}
+	case NetworkLSAtype:
+		var routers []uint32
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 4 {
+			routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+		}
+		content = NetworkLSA{
+			Options:        binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			AttachedRouter: routers,
+		}
+	case InterAreaPrefixLSAtype:
+		content = InterAreaPrefixLSA{
+			Metric:        binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			PrefixLength:  uint8(data[24]),
+			PrefixOptions: uint8(data[25]),
+			AddressPrefix: data[28:uint32(lsalength)],
+		}
+	case InterAreaRouterLSAtype:
+		content = InterAreaRouterLSA{
+			Options:             binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			Metric:              binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+			DestinationRouterID: binary.BigEndian.Uint32(data[28:32]),
+		}
+	case ASExternalLSAtype:
+		fallthrough
+	case NSSALSAtype:
+
+		flags := uint8(data[20])
+		prefixLen := uint8(data[24]) / 8
+		var forwardingAddress []byte
+		if (flags & 0x02) == 0x02 {
+			forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16]
+		}
+		content = ASExternalLSA{
+			Flags:             flags,
+			Metric:            binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			PrefixLength:      prefixLen,
+			PrefixOptions:     uint8(data[25]),
+			RefLSType:         binary.BigEndian.Uint16(data[26:28]),
+			AddressPrefix:     data[28 : 28+uint32(prefixLen)],
+			ForwardingAddress: forwardingAddress,
+		}
+	case LinkLSAtype:
+		var prefixes []Prefix
+		var prefixOffset uint32 = 44
+		var j uint32
+		numOfPrefixes := binary.BigEndian.Uint32(data[40:44])
+		for j = 0; j < numOfPrefixes; j++ {
+			prefixLen := uint8(data[prefixOffset])
+			prefix := Prefix{
+				PrefixLength:  prefixLen,
+				PrefixOptions: uint8(data[prefixOffset+1]),
+				AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+			}
+			prefixes = append(prefixes, prefix)
+			prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8
+		}
+		content = LinkLSA{
+			RtrPriority:      uint8(data[20]),
+			Options:          binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			LinkLocalAddress: data[24:40],
+			NumOfPrefixes:    numOfPrefixes,
+			Prefixes:         prefixes,
+		}
+	case IntraAreaPrefixLSAtype:
+		var prefixes []Prefix
+		var prefixOffset uint32 = 32
+		var j uint16
+		numOfPrefixes := binary.BigEndian.Uint16(data[20:22])
+		for j = 0; j < numOfPrefixes; j++ {
+			prefixLen := uint8(data[prefixOffset])
+			prefix := Prefix{
+				PrefixLength:  prefixLen,
+				PrefixOptions: uint8(data[prefixOffset+1]),
+				Metric:        binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]),
+				AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+			}
+			prefixes = append(prefixes, prefix)
+			prefixOffset = prefixOffset + 4 + uint32(prefixLen)
+		}
+		content = IntraAreaPrefixLSA{
+			NumOfPrefixes:  numOfPrefixes,
+			RefLSType:      binary.BigEndian.Uint16(data[22:24]),
+			RefLinkStateID: binary.BigEndian.Uint32(data[24:28]),
+			RefAdvRouter:   binary.BigEndian.Uint32(data[28:32]),
+			Prefixes:       prefixes,
+		}
+	default:
+		return nil, fmt.Errorf("Unknown Link State type.")
+	}
+	return content, nil
+}
+
+// getLSAs parses the LSA information from the packet for OSPFv3
+func getLSAs(num uint32, data []byte) ([]LSA, error) {
+	var lsas []LSA
+	var i uint32 = 0
+	var offset uint32 = 0
+	for ; i < num; i++ {
+		var content interface{}
+		lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+
+		content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+		if err != nil {
+			return nil, fmt.Errorf("Could not extract Link State type.")
+		}
+		lsa := LSA{
+			LSAheader: LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[offset : offset+2]),
+				LSType:      lstype,
+				LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+				Length:      lsalength,
+			},
+			Content: content,
+		}
+		lsas = append(lsas, lsa)
+		offset += uint32(lsalength)
+	}
+	return lsas, nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 24 {
+		return fmt.Errorf("Packet too smal for OSPF Version 2")
+	}
+
+	ospf.Version = uint8(data[0])
+	ospf.Type = OSPFType(data[1])
+	ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+	ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+	ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+	ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+	ospf.AuType = binary.BigEndian.Uint16(data[14:16])
+	ospf.Authentication = binary.BigEndian.Uint64(data[16:24])
+
+	switch ospf.Type {
+	case OSPFHello:
+		var neighbors []uint32
+		for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 {
+			neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+		}
+		ospf.Content = HelloPkgV2{
+			NetworkMask: binary.BigEndian.Uint32(data[24:28]),
+			HelloPkg: HelloPkg{
+				HelloInterval:            binary.BigEndian.Uint16(data[28:30]),
+				Options:                  uint32(data[30]),
+				RtrPriority:              uint8(data[31]),
+				RouterDeadInterval:       binary.BigEndian.Uint32(data[32:36]),
+				DesignatedRouterID:       binary.BigEndian.Uint32(data[36:40]),
+				BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]),
+				NeighborID:               neighbors,
+			},
+		}
+	case OSPFDatabaseDescription:
+		var lsas []LSAheader
+		for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = DbDescPkg{
+			InterfaceMTU: binary.BigEndian.Uint16(data[24:26]),
+			Options:      uint32(data[26]),
+			Flags:        uint16(data[27]),
+			DDSeqNumber:  binary.BigEndian.Uint32(data[28:32]),
+			LSAinfo:      lsas,
+		}
+	case OSPFLinkStateRequest:
+		var lsrs []LSReq
+		for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 {
+			lsr := LSReq{
+				LSType:    binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LSID:      binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+			}
+			lsrs = append(lsrs, lsr)
+		}
+		ospf.Content = lsrs
+	case OSPFLinkStateUpdate:
+		num := binary.BigEndian.Uint32(data[24:28])
+
+		lsas, err := getLSAsv2(num, data[28:])
+		if err != nil {
+			return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+		}
+		ospf.Content = LSUpdate{
+			NumOfLSAs: num,
+			LSAs:      lsas,
+		}
+	case OSPFLinkStateAcknowledgment:
+		var lsas []LSAheader
+		for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSOptions:   data[i+2],
+				LSType:      uint16(data[i+3]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = lsas
+	}
+	return nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	if len(data) < 16 {
+		return fmt.Errorf("Packet too smal for OSPF Version 3")
+	}
+
+	ospf.Version = uint8(data[0])
+	ospf.Type = OSPFType(data[1])
+	ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+	ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+	ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+	ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+	ospf.Instance = uint8(data[14])
+	ospf.Reserved = uint8(data[15])
+
+	switch ospf.Type {
+	case OSPFHello:
+		var neighbors []uint32
+		for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 {
+			neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+		}
+		ospf.Content = HelloPkg{
+			InterfaceID:              binary.BigEndian.Uint32(data[16:20]),
+			RtrPriority:              uint8(data[20]),
+			Options:                  binary.BigEndian.Uint32(data[21:25]) >> 8,
+			HelloInterval:            binary.BigEndian.Uint16(data[24:26]),
+			RouterDeadInterval:       uint32(binary.BigEndian.Uint16(data[26:28])),
+			DesignatedRouterID:       binary.BigEndian.Uint32(data[28:32]),
+			BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]),
+			NeighborID:               neighbors,
+		}
+	case OSPFDatabaseDescription:
+		var lsas []LSAheader
+		for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = DbDescPkg{
+			Options:      binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF,
+			InterfaceMTU: binary.BigEndian.Uint16(data[20:22]),
+			Flags:        binary.BigEndian.Uint16(data[22:24]),
+			DDSeqNumber:  binary.BigEndian.Uint32(data[24:28]),
+			LSAinfo:      lsas,
+		}
+	case OSPFLinkStateRequest:
+		var lsrs []LSReq
+		for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 {
+			lsr := LSReq{
+				LSType:    binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LSID:      binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+			}
+			lsrs = append(lsrs, lsr)
+		}
+		ospf.Content = lsrs
+	case OSPFLinkStateUpdate:
+		num := binary.BigEndian.Uint32(data[16:20])
+		lsas, err := getLSAs(num, data[20:])
+		if err != nil {
+			return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+		}
+		ospf.Content = LSUpdate{
+			NumOfLSAs: num,
+			LSAs:      lsas,
+		}
+
+	case OSPFLinkStateAcknowledgment:
+		var lsas []LSAheader
+		for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = lsas
+	default:
+	}
+
+	return nil
+}
+
+// LayerType returns LayerTypeOSPF
+func (ospf *OSPFv2) LayerType() gopacket.LayerType {
+	return LayerTypeOSPF
+}
+func (ospf *OSPFv3) LayerType() gopacket.LayerType {
+	return LayerTypeOSPF
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (ospf *OSPFv2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+func (ospf *OSPFv3) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ospf *OSPFv2) CanDecode() gopacket.LayerClass {
+	return LayerTypeOSPF
+}
+func (ospf *OSPFv3) CanDecode() gopacket.LayerClass {
+	return LayerTypeOSPF
+}
+
+func decodeOSPF(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 14 {
+		return fmt.Errorf("Packet too smal for OSPF")
+	}
+
+	switch uint8(data[0]) {
+	case 2:
+		ospf := &OSPFv2{}
+		return decodingLayerDecoder(ospf, data, p)
+	case 3:
+		ospf := &OSPFv3{}
+		return decodingLayerDecoder(ospf, data, p)
+	default:
+	}
+
+	return fmt.Errorf("Unable to determine OSPF type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/google/gopacket/layers/pflog.go
new file mode 100644
index 0000000..853882f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pflog.go
@@ -0,0 +1,76 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+type PFDirection uint8
+
+const (
+	PFDirectionInOut PFDirection = 0
+	PFDirectionIn    PFDirection = 1
+	PFDirectionOut   PFDirection = 2
+)
+
+// PFLog provides the layer for 'pf' packet-filter logging, as described at
+// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4
+type PFLog struct {
+	BaseLayer
+	Length              uint8
+	Family              ProtocolFamily
+	Action, Reason      uint8
+	IFName, Ruleset     []byte
+	RuleNum, SubruleNum uint32
+	UID                 uint32
+	PID                 int32
+	RuleUID             uint32
+	RulePID             int32
+	Direction           PFDirection
+	// The remainder is padding
+}
+
+func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	pf.Length = data[0]
+	pf.Family = ProtocolFamily(data[1])
+	pf.Action = data[2]
+	pf.Reason = data[3]
+	pf.IFName = data[4:20]
+	pf.Ruleset = data[20:36]
+	pf.RuleNum = binary.BigEndian.Uint32(data[36:40])
+	pf.SubruleNum = binary.BigEndian.Uint32(data[40:44])
+	pf.UID = binary.BigEndian.Uint32(data[44:48])
+	pf.PID = int32(binary.BigEndian.Uint32(data[48:52]))
+	pf.RuleUID = binary.BigEndian.Uint32(data[52:56])
+	pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60]))
+	pf.Direction = PFDirection(data[60])
+	if pf.Length%4 != 1 {
+		return errors.New("PFLog header length should be 3 less than multiple of 4")
+	}
+	actualLength := int(pf.Length) + 3
+	pf.Contents = data[:actualLength]
+	pf.Payload = data[actualLength:]
+	return nil
+}
+
+// LayerType returns layers.LayerTypePFLog
+func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog }
+
+func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog }
+
+func (pf *PFLog) NextLayerType() gopacket.LayerType {
+	return pf.Family.LayerType()
+}
+
+func decodePFLog(data []byte, p gopacket.PacketBuilder) error {
+	pf := &PFLog{}
+	return decodingLayerDecoder(pf, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/google/gopacket/layers/ports.go
new file mode 100644
index 0000000..705fd1d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ports.go
@@ -0,0 +1,154 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"fmt"
+	"strconv"
+
+	"github.com/google/gopacket"
+)
+
+// TCPPort is a port in a TCP layer.
+type TCPPort uint16
+
+// UDPPort is a port in a UDP layer.
+type UDPPort uint16
+
+// RUDPPort is a port in a RUDP layer.
+type RUDPPort uint8
+
+// SCTPPort is a port in a SCTP layer.
+type SCTPPort uint16
+
+// UDPLitePort is a port in a UDPLite layer.
+type UDPLitePort uint16
+
+// RUDPPortNames contains the string names for all RUDP ports.
+var RUDPPortNames = map[RUDPPort]string{}
+
+// UDPLitePortNames contains the string names for all UDPLite ports.
+var UDPLitePortNames = map[UDPLitePort]string{}
+
+// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// TCPPortNames.
+func (a TCPPort) String() string {
+	if name, ok := TCPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a TCPPort) LayerType() gopacket.LayerType {
+	lt := tcpPortLayerType[uint16(a)]
+	if lt != 0 {
+		return lt
+	}
+	return gopacket.LayerTypePayload
+}
+
+var tcpPortLayerType = [65536]gopacket.LayerType{
+	53:   LayerTypeDNS,
+	443:  LayerTypeTLS,       // https
+	502:  LayerTypeModbusTCP, // modbustcp
+	636:  LayerTypeTLS,       // ldaps
+	989:  LayerTypeTLS,       // ftps-data
+	990:  LayerTypeTLS,       // ftps
+	992:  LayerTypeTLS,       // telnets
+	993:  LayerTypeTLS,       // imaps
+	994:  LayerTypeTLS,       // ircs
+	995:  LayerTypeTLS,       // pop3s
+	5061: LayerTypeTLS,       // ips
+}
+
+// RegisterTCPPortLayerType creates a new mapping between a TCPPort
+// and an underlaying LayerType.
+func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) {
+	tcpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// UDPPortNames.
+func (a UDPPort) String() string {
+	if name, ok := UDPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a UDPPort) LayerType() gopacket.LayerType {
+	lt := udpPortLayerType[uint16(a)]
+	if lt != 0 {
+		return lt
+	}
+	return gopacket.LayerTypePayload
+}
+
+var udpPortLayerType = [65536]gopacket.LayerType{
+	53:   LayerTypeDNS,
+	123:  LayerTypeNTP,
+	4789: LayerTypeVXLAN,
+	67:   LayerTypeDHCPv4,
+	68:   LayerTypeDHCPv4,
+	546:  LayerTypeDHCPv6,
+	547:  LayerTypeDHCPv6,
+	5060: LayerTypeSIP,
+	6343: LayerTypeSFlow,
+	6081: LayerTypeGeneve,
+	3784: LayerTypeBFD,
+	2152: LayerTypeGTPv1U,
+}
+
+// RegisterUDPPortLayerType creates a new mapping between a UDPPort
+// and an underlaying LayerType.
+func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) {
+	udpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// RUDPPortNames.
+func (a RUDPPort) String() string {
+	if name, ok := RUDPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// SCTPPortNames.
+func (a SCTPPort) String() string {
+	if name, ok := SCTPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// UDPLitePortNames.
+func (a UDPLitePort) String() string {
+	if name, ok := UDPLitePortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
diff --git a/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/google/gopacket/layers/ppp.go
new file mode 100644
index 0000000..e534d69
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ppp.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+// PPP is the layer for PPP encapsulation headers.
+type PPP struct {
+	BaseLayer
+	PPPType       PPPType
+	HasPPTPHeader bool
+}
+
+// PPPEndpoint is a singleton endpoint for PPP.  Since there is no actual
+// addressing for the two ends of a PPP connection, we use a singleton value
+// named 'point' for each endpoint.
+var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil)
+
+// PPPFlow is a singleton flow for PPP.  Since there is no actual addressing for
+// the two ends of a PPP connection, we use a singleton value to represent the
+// flow for all PPP connections.
+var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil)
+
+// LayerType returns LayerTypePPP
+func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP }
+
+// LinkFlow returns PPPFlow.
+func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow }
+
+func decodePPP(data []byte, p gopacket.PacketBuilder) error {
+	ppp := &PPP{}
+	offset := 0
+	if data[0] == 0xff && data[1] == 0x03 {
+		offset = 2
+		ppp.HasPPTPHeader = true
+	}
+	if data[offset]&0x1 == 0 {
+		if data[offset+1]&0x1 == 0 {
+			return errors.New("PPP has invalid type")
+		}
+		ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2]))
+		ppp.Contents = data[offset : offset+2]
+		ppp.Payload = data[offset+2:]
+	} else {
+		ppp.PPPType = PPPType(data[offset])
+		ppp.Contents = data[offset : offset+1]
+		ppp.Payload = data[offset+1:]
+	}
+	p.AddLayer(ppp)
+	p.SetLinkLayer(ppp)
+	return p.NextDecoder(ppp.PPPType)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if p.PPPType&0x100 == 0 {
+		bytes, err := b.PrependBytes(2)
+		if err != nil {
+			return err
+		}
+		binary.BigEndian.PutUint16(bytes, uint16(p.PPPType))
+	} else {
+		bytes, err := b.PrependBytes(1)
+		if err != nil {
+			return err
+		}
+		bytes[0] = uint8(p.PPPType)
+	}
+	if p.HasPPTPHeader {
+		bytes, err := b.PrependBytes(2)
+		if err != nil {
+			return err
+		}
+		bytes[0] = 0xff
+		bytes[1] = 0x03
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/google/gopacket/layers/pppoe.go
new file mode 100644
index 0000000..14cd63a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pppoe.go
@@ -0,0 +1,60 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// PPPoE is the layer for PPPoE encapsulation headers.
+type PPPoE struct {
+	BaseLayer
+	Version   uint8
+	Type      uint8
+	Code      PPPoECode
+	SessionId uint16
+	Length    uint16
+}
+
+// LayerType returns gopacket.LayerTypePPPoE.
+func (p *PPPoE) LayerType() gopacket.LayerType {
+	return LayerTypePPPoE
+}
+
+// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516).
+func decodePPPoE(data []byte, p gopacket.PacketBuilder) error {
+	pppoe := &PPPoE{
+		Version:   data[0] >> 4,
+		Type:      data[0] & 0x0F,
+		Code:      PPPoECode(data[1]),
+		SessionId: binary.BigEndian.Uint16(data[2:4]),
+		Length:    binary.BigEndian.Uint16(data[4:6]),
+	}
+	pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]}
+	p.AddLayer(pppoe)
+	return p.NextDecoder(pppoe.Code)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	payload := b.Bytes()
+	bytes, err := b.PrependBytes(6)
+	if err != nil {
+		return err
+	}
+	bytes[0] = (p.Version << 4) | p.Type
+	bytes[1] = byte(p.Code)
+	binary.BigEndian.PutUint16(bytes[2:], p.SessionId)
+	if opts.FixLengths {
+		p.Length = uint16(len(payload))
+	}
+	binary.BigEndian.PutUint16(bytes[4:], p.Length)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/google/gopacket/layers/prism.go
new file mode 100644
index 0000000..e1711e7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/prism.go
@@ -0,0 +1,146 @@
+// Copyright 2015 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+func decodePrismValue(data []byte, pv *PrismValue) {
+	pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4]))
+	pv.Status = binary.LittleEndian.Uint16(data[4:6])
+	pv.Length = binary.LittleEndian.Uint16(data[6:8])
+	pv.Data = data[8 : 8+pv.Length]
+}
+
+type PrismDID uint32
+
+const (
+	PrismDIDType1HostTime                  PrismDID = 0x10044
+	PrismDIDType2HostTime                  PrismDID = 0x01041
+	PrismDIDType1MACTime                   PrismDID = 0x20044
+	PrismDIDType2MACTime                   PrismDID = 0x02041
+	PrismDIDType1Channel                   PrismDID = 0x30044
+	PrismDIDType2Channel                   PrismDID = 0x03041
+	PrismDIDType1RSSI                      PrismDID = 0x40044
+	PrismDIDType2RSSI                      PrismDID = 0x04041
+	PrismDIDType1SignalQuality             PrismDID = 0x50044
+	PrismDIDType2SignalQuality             PrismDID = 0x05041
+	PrismDIDType1Signal                    PrismDID = 0x60044
+	PrismDIDType2Signal                    PrismDID = 0x06041
+	PrismDIDType1Noise                     PrismDID = 0x70044
+	PrismDIDType2Noise                     PrismDID = 0x07041
+	PrismDIDType1Rate                      PrismDID = 0x80044
+	PrismDIDType2Rate                      PrismDID = 0x08041
+	PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044
+	PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041
+	PrismDIDType1FrameLength               PrismDID = 0xA0044
+	PrismDIDType2FrameLength               PrismDID = 0x0A041
+)
+
+const (
+	PrismType1MessageCode uint16 = 0x00000044
+	PrismType2MessageCode uint16 = 0x00000041
+)
+
+func (p PrismDID) String() string {
+	dids := map[PrismDID]string{
+		PrismDIDType1HostTime:                  "Host Time",
+		PrismDIDType2HostTime:                  "Host Time",
+		PrismDIDType1MACTime:                   "MAC Time",
+		PrismDIDType2MACTime:                   "MAC Time",
+		PrismDIDType1Channel:                   "Channel",
+		PrismDIDType2Channel:                   "Channel",
+		PrismDIDType1RSSI:                      "RSSI",
+		PrismDIDType2RSSI:                      "RSSI",
+		PrismDIDType1SignalQuality:             "Signal Quality",
+		PrismDIDType2SignalQuality:             "Signal Quality",
+		PrismDIDType1Signal:                    "Signal",
+		PrismDIDType2Signal:                    "Signal",
+		PrismDIDType1Noise:                     "Noise",
+		PrismDIDType2Noise:                     "Noise",
+		PrismDIDType1Rate:                      "Rate",
+		PrismDIDType2Rate:                      "Rate",
+		PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator",
+		PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator",
+		PrismDIDType1FrameLength:               "Frame Length",
+		PrismDIDType2FrameLength:               "Frame Length",
+	}
+
+	if str, ok := dids[p]; ok {
+		return str
+	}
+
+	return "Unknown DID"
+}
+
+type PrismValue struct {
+	DID    PrismDID
+	Status uint16
+	Length uint16
+	Data   []byte
+}
+
+func (pv *PrismValue) IsSupplied() bool {
+	return pv.Status == 1
+}
+
+var ErrPrismExpectedMoreData = errors.New("Expected more data.")
+var ErrPrismInvalidCode = errors.New("Invalid header code.")
+
+func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error {
+	d := &PrismHeader{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type PrismHeader struct {
+	BaseLayer
+	Code       uint16
+	Length     uint16
+	DeviceName string
+	Values     []PrismValue
+}
+
+func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader }
+
+func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Code = binary.LittleEndian.Uint16(data[0:4])
+	m.Length = binary.LittleEndian.Uint16(data[4:8])
+	m.DeviceName = string(data[8:24])
+	m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]}
+
+	switch m.Code {
+	case PrismType1MessageCode:
+		fallthrough
+	case PrismType2MessageCode:
+		// valid message code
+	default:
+		return ErrPrismInvalidCode
+	}
+
+	offset := uint16(24)
+
+	m.Values = make([]PrismValue, (m.Length-offset)/12)
+	for i := 0; i < len(m.Values); i++ {
+		decodePrismValue(data[offset:offset+12], &m.Values[i])
+		offset += 12
+	}
+
+	if offset != m.Length {
+		return ErrPrismExpectedMoreData
+	}
+
+	return nil
+}
+
+func (m *PrismHeader) CanDecode() gopacket.LayerClass    { return LayerTypePrismHeader }
+func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/google/gopacket/layers/radiotap.go
new file mode 100644
index 0000000..17c6133
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/radiotap.go
@@ -0,0 +1,1069 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// align calculates the number of bytes needed to align with the width
+// on the offset, returning the number of bytes we need to skip to
+// align to the offset (width).
+func align(offset uint16, width uint16) uint16 {
+	return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset)
+}
+
+type RadioTapPresent uint32
+
+const (
+	RadioTapPresentTSFT RadioTapPresent = 1 << iota
+	RadioTapPresentFlags
+	RadioTapPresentRate
+	RadioTapPresentChannel
+	RadioTapPresentFHSS
+	RadioTapPresentDBMAntennaSignal
+	RadioTapPresentDBMAntennaNoise
+	RadioTapPresentLockQuality
+	RadioTapPresentTxAttenuation
+	RadioTapPresentDBTxAttenuation
+	RadioTapPresentDBMTxPower
+	RadioTapPresentAntenna
+	RadioTapPresentDBAntennaSignal
+	RadioTapPresentDBAntennaNoise
+	RadioTapPresentRxFlags
+	RadioTapPresentTxFlags
+	RadioTapPresentRtsRetries
+	RadioTapPresentDataRetries
+	_
+	RadioTapPresentMCS
+	RadioTapPresentAMPDUStatus
+	RadioTapPresentVHT
+	RadioTapPresentEXT RadioTapPresent = 1 << 31
+)
+
+func (r RadioTapPresent) TSFT() bool {
+	return r&RadioTapPresentTSFT != 0
+}
+func (r RadioTapPresent) Flags() bool {
+	return r&RadioTapPresentFlags != 0
+}
+func (r RadioTapPresent) Rate() bool {
+	return r&RadioTapPresentRate != 0
+}
+func (r RadioTapPresent) Channel() bool {
+	return r&RadioTapPresentChannel != 0
+}
+func (r RadioTapPresent) FHSS() bool {
+	return r&RadioTapPresentFHSS != 0
+}
+func (r RadioTapPresent) DBMAntennaSignal() bool {
+	return r&RadioTapPresentDBMAntennaSignal != 0
+}
+func (r RadioTapPresent) DBMAntennaNoise() bool {
+	return r&RadioTapPresentDBMAntennaNoise != 0
+}
+func (r RadioTapPresent) LockQuality() bool {
+	return r&RadioTapPresentLockQuality != 0
+}
+func (r RadioTapPresent) TxAttenuation() bool {
+	return r&RadioTapPresentTxAttenuation != 0
+}
+func (r RadioTapPresent) DBTxAttenuation() bool {
+	return r&RadioTapPresentDBTxAttenuation != 0
+}
+func (r RadioTapPresent) DBMTxPower() bool {
+	return r&RadioTapPresentDBMTxPower != 0
+}
+func (r RadioTapPresent) Antenna() bool {
+	return r&RadioTapPresentAntenna != 0
+}
+func (r RadioTapPresent) DBAntennaSignal() bool {
+	return r&RadioTapPresentDBAntennaSignal != 0
+}
+func (r RadioTapPresent) DBAntennaNoise() bool {
+	return r&RadioTapPresentDBAntennaNoise != 0
+}
+func (r RadioTapPresent) RxFlags() bool {
+	return r&RadioTapPresentRxFlags != 0
+}
+func (r RadioTapPresent) TxFlags() bool {
+	return r&RadioTapPresentTxFlags != 0
+}
+func (r RadioTapPresent) RtsRetries() bool {
+	return r&RadioTapPresentRtsRetries != 0
+}
+func (r RadioTapPresent) DataRetries() bool {
+	return r&RadioTapPresentDataRetries != 0
+}
+func (r RadioTapPresent) MCS() bool {
+	return r&RadioTapPresentMCS != 0
+}
+func (r RadioTapPresent) AMPDUStatus() bool {
+	return r&RadioTapPresentAMPDUStatus != 0
+}
+func (r RadioTapPresent) VHT() bool {
+	return r&RadioTapPresentVHT != 0
+}
+func (r RadioTapPresent) EXT() bool {
+	return r&RadioTapPresentEXT != 0
+}
+
+type RadioTapChannelFlags uint16
+
+const (
+	RadioTapChannelFlagsTurbo   RadioTapChannelFlags = 0x0010 // Turbo channel
+	RadioTapChannelFlagsCCK     RadioTapChannelFlags = 0x0020 // CCK channel
+	RadioTapChannelFlagsOFDM    RadioTapChannelFlags = 0x0040 // OFDM channel
+	RadioTapChannelFlagsGhz2    RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel.
+	RadioTapChannelFlagsGhz5    RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel
+	RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed
+	RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel
+	RadioTapChannelFlagsGFSK    RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY)
+)
+
+func (r RadioTapChannelFlags) Turbo() bool {
+	return r&RadioTapChannelFlagsTurbo != 0
+}
+func (r RadioTapChannelFlags) CCK() bool {
+	return r&RadioTapChannelFlagsCCK != 0
+}
+func (r RadioTapChannelFlags) OFDM() bool {
+	return r&RadioTapChannelFlagsOFDM != 0
+}
+func (r RadioTapChannelFlags) Ghz2() bool {
+	return r&RadioTapChannelFlagsGhz2 != 0
+}
+func (r RadioTapChannelFlags) Ghz5() bool {
+	return r&RadioTapChannelFlagsGhz5 != 0
+}
+func (r RadioTapChannelFlags) Passive() bool {
+	return r&RadioTapChannelFlagsPassive != 0
+}
+func (r RadioTapChannelFlags) Dynamic() bool {
+	return r&RadioTapChannelFlagsDynamic != 0
+}
+func (r RadioTapChannelFlags) GFSK() bool {
+	return r&RadioTapChannelFlagsGFSK != 0
+}
+
+// String provides a human readable string for RadioTapChannelFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapChannelFlags value, not its string.
+func (a RadioTapChannelFlags) String() string {
+	var out bytes.Buffer
+	if a.Turbo() {
+		out.WriteString("Turbo,")
+	}
+	if a.CCK() {
+		out.WriteString("CCK,")
+	}
+	if a.OFDM() {
+		out.WriteString("OFDM,")
+	}
+	if a.Ghz2() {
+		out.WriteString("Ghz2,")
+	}
+	if a.Ghz5() {
+		out.WriteString("Ghz5,")
+	}
+	if a.Passive() {
+		out.WriteString("Passive,")
+	}
+	if a.Dynamic() {
+		out.WriteString("Dynamic,")
+	}
+	if a.GFSK() {
+		out.WriteString("GFSK,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type RadioTapFlags uint8
+
+const (
+	RadioTapFlagsCFP           RadioTapFlags = 1 << iota // sent/received during CFP
+	RadioTapFlagsShortPreamble                           // sent/received * with short * preamble
+	RadioTapFlagsWEP                                     // sent/received * with WEP encryption
+	RadioTapFlagsFrag                                    // sent/received * with fragmentation
+	RadioTapFlagsFCS                                     // frame includes FCS
+	RadioTapFlagsDatapad                                 // frame has padding between * 802.11 header and payload * (to 32-bit boundary)
+	RadioTapFlagsBadFCS                                  // does not pass FCS check
+	RadioTapFlagsShortGI                                 // HT short GI
+)
+
+func (r RadioTapFlags) CFP() bool {
+	return r&RadioTapFlagsCFP != 0
+}
+func (r RadioTapFlags) ShortPreamble() bool {
+	return r&RadioTapFlagsShortPreamble != 0
+}
+func (r RadioTapFlags) WEP() bool {
+	return r&RadioTapFlagsWEP != 0
+}
+func (r RadioTapFlags) Frag() bool {
+	return r&RadioTapFlagsFrag != 0
+}
+func (r RadioTapFlags) FCS() bool {
+	return r&RadioTapFlagsFCS != 0
+}
+func (r RadioTapFlags) Datapad() bool {
+	return r&RadioTapFlagsDatapad != 0
+}
+func (r RadioTapFlags) BadFCS() bool {
+	return r&RadioTapFlagsBadFCS != 0
+}
+func (r RadioTapFlags) ShortGI() bool {
+	return r&RadioTapFlagsShortGI != 0
+}
+
+// String provides a human readable string for RadioTapFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapFlags value, not its string.
+func (a RadioTapFlags) String() string {
+	var out bytes.Buffer
+	if a.CFP() {
+		out.WriteString("CFP,")
+	}
+	if a.ShortPreamble() {
+		out.WriteString("SHORT-PREAMBLE,")
+	}
+	if a.WEP() {
+		out.WriteString("WEP,")
+	}
+	if a.Frag() {
+		out.WriteString("FRAG,")
+	}
+	if a.FCS() {
+		out.WriteString("FCS,")
+	}
+	if a.Datapad() {
+		out.WriteString("DATAPAD,")
+	}
+	if a.ShortGI() {
+		out.WriteString("SHORT-GI,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type RadioTapRate uint8
+
+func (a RadioTapRate) String() string {
+	return fmt.Sprintf("%v Mb/s", 0.5*float32(a))
+}
+
+type RadioTapChannelFrequency uint16
+
+func (a RadioTapChannelFrequency) String() string {
+	return fmt.Sprintf("%d MHz", a)
+}
+
+type RadioTapRxFlags uint16
+
+const (
+	RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002
+)
+
+func (self RadioTapRxFlags) BadPlcp() bool {
+	return self&RadioTapRxFlagsBadPlcp != 0
+}
+
+func (self RadioTapRxFlags) String() string {
+	if self.BadPlcp() {
+		return "BADPLCP"
+	}
+	return ""
+}
+
+type RadioTapTxFlags uint16
+
+const (
+	RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota
+	RadioTapTxFlagsCTS
+	RadioTapTxFlagsRTS
+	RadioTapTxFlagsNoACK
+)
+
+func (self RadioTapTxFlags) Fail() bool  { return self&RadioTapTxFlagsFail != 0 }
+func (self RadioTapTxFlags) CTS() bool   { return self&RadioTapTxFlagsCTS != 0 }
+func (self RadioTapTxFlags) RTS() bool   { return self&RadioTapTxFlagsRTS != 0 }
+func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 }
+
+func (self RadioTapTxFlags) String() string {
+	var tokens []string
+	if self.Fail() {
+		tokens = append(tokens, "Fail")
+	}
+	if self.CTS() {
+		tokens = append(tokens, "CTS")
+	}
+	if self.RTS() {
+		tokens = append(tokens, "RTS")
+	}
+	if self.NoACK() {
+		tokens = append(tokens, "NoACK")
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapMCS struct {
+	Known RadioTapMCSKnown
+	Flags RadioTapMCSFlags
+	MCS   uint8
+}
+
+func (self RadioTapMCS) String() string {
+	var tokens []string
+	if self.Known.Bandwidth() {
+		token := "?"
+		switch self.Flags.Bandwidth() {
+		case 0:
+			token = "20"
+		case 1:
+			token = "40"
+		case 2:
+			token = "40(20L)"
+		case 3:
+			token = "40(20U)"
+		}
+		tokens = append(tokens, token)
+	}
+	if self.Known.MCSIndex() {
+		tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS))
+	}
+	if self.Known.GuardInterval() {
+		if self.Flags.ShortGI() {
+			tokens = append(tokens, fmt.Sprintf("shortGI"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("longGI"))
+		}
+	}
+	if self.Known.HTFormat() {
+		if self.Flags.Greenfield() {
+			tokens = append(tokens, fmt.Sprintf("HT-greenfield"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("HT-mixed"))
+		}
+	}
+	if self.Known.FECType() {
+		if self.Flags.FECLDPC() {
+			tokens = append(tokens, fmt.Sprintf("LDPC"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("BCC"))
+		}
+	}
+	if self.Known.STBC() {
+		tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC()))
+	}
+	if self.Known.NESS() {
+		num := 0
+		if self.Known.NESS1() {
+			num |= 0x02
+		}
+		if self.Flags.NESS0() {
+			num |= 0x01
+		}
+		tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapMCSKnown uint8
+
+const (
+	RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota
+	RadioTapMCSKnownMCSIndex
+	RadioTapMCSKnownGuardInterval
+	RadioTapMCSKnownHTFormat
+	RadioTapMCSKnownFECType
+	RadioTapMCSKnownSTBC
+	RadioTapMCSKnownNESS
+	RadioTapMCSKnownNESS1
+)
+
+func (self RadioTapMCSKnown) Bandwidth() bool     { return self&RadioTapMCSKnownBandwidth != 0 }
+func (self RadioTapMCSKnown) MCSIndex() bool      { return self&RadioTapMCSKnownMCSIndex != 0 }
+func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 }
+func (self RadioTapMCSKnown) HTFormat() bool      { return self&RadioTapMCSKnownHTFormat != 0 }
+func (self RadioTapMCSKnown) FECType() bool       { return self&RadioTapMCSKnownFECType != 0 }
+func (self RadioTapMCSKnown) STBC() bool          { return self&RadioTapMCSKnownSTBC != 0 }
+func (self RadioTapMCSKnown) NESS() bool          { return self&RadioTapMCSKnownNESS != 0 }
+func (self RadioTapMCSKnown) NESS1() bool         { return self&RadioTapMCSKnownNESS1 != 0 }
+
+type RadioTapMCSFlags uint8
+
+const (
+	RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03
+	RadioTapMCSFlagsShortGI                        = 0x04
+	RadioTapMCSFlagsGreenfield                     = 0x08
+	RadioTapMCSFlagsFECLDPC                        = 0x10
+	RadioTapMCSFlagsSTBCMask                       = 0x60
+	RadioTapMCSFlagsNESS0                          = 0x80
+)
+
+func (self RadioTapMCSFlags) Bandwidth() int {
+	return int(self & RadioTapMCSFlagsBandwidthMask)
+}
+func (self RadioTapMCSFlags) ShortGI() bool    { return self&RadioTapMCSFlagsShortGI != 0 }
+func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 }
+func (self RadioTapMCSFlags) FECLDPC() bool    { return self&RadioTapMCSFlagsFECLDPC != 0 }
+func (self RadioTapMCSFlags) STBC() int {
+	return int(self&RadioTapMCSFlagsSTBCMask) >> 5
+}
+func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 }
+
+type RadioTapAMPDUStatus struct {
+	Reference uint32
+	Flags     RadioTapAMPDUStatusFlags
+	CRC       uint8
+}
+
+func (self RadioTapAMPDUStatus) String() string {
+	tokens := []string{
+		fmt.Sprintf("ref#%x", self.Reference),
+	}
+	if self.Flags.ReportZerolen() && self.Flags.IsZerolen() {
+		tokens = append(tokens, fmt.Sprintf("zero-length"))
+	}
+	if self.Flags.LastKnown() && self.Flags.IsLast() {
+		tokens = append(tokens, "last")
+	}
+	if self.Flags.DelimCRCErr() {
+		tokens = append(tokens, "delimiter CRC error")
+	}
+	if self.Flags.DelimCRCKnown() {
+		tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapAMPDUStatusFlags uint16
+
+const (
+	RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota
+	RadioTapAMPDUIsZerolen
+	RadioTapAMPDULastKnown
+	RadioTapAMPDUIsLast
+	RadioTapAMPDUDelimCRCErr
+	RadioTapAMPDUDelimCRCKnown
+)
+
+func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool {
+	return self&RadioTapAMPDUStatusFlagsReportZerolen != 0
+}
+func (self RadioTapAMPDUStatusFlags) IsZerolen() bool     { return self&RadioTapAMPDUIsZerolen != 0 }
+func (self RadioTapAMPDUStatusFlags) LastKnown() bool     { return self&RadioTapAMPDULastKnown != 0 }
+func (self RadioTapAMPDUStatusFlags) IsLast() bool        { return self&RadioTapAMPDUIsLast != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool   { return self&RadioTapAMPDUDelimCRCErr != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool { return self&RadioTapAMPDUDelimCRCKnown != 0 }
+
+type RadioTapVHT struct {
+	Known      RadioTapVHTKnown
+	Flags      RadioTapVHTFlags
+	Bandwidth  uint8
+	MCSNSS     [4]RadioTapVHTMCSNSS
+	Coding     uint8
+	GroupId    uint8
+	PartialAID uint16
+}
+
+func (self RadioTapVHT) String() string {
+	var tokens []string
+	if self.Known.STBC() {
+		if self.Flags.STBC() {
+			tokens = append(tokens, "STBC")
+		} else {
+			tokens = append(tokens, "no STBC")
+		}
+	}
+	if self.Known.TXOPPSNotAllowed() {
+		if self.Flags.TXOPPSNotAllowed() {
+			tokens = append(tokens, "TXOP doze not allowed")
+		} else {
+			tokens = append(tokens, "TXOP doze allowed")
+		}
+	}
+	if self.Known.GI() {
+		if self.Flags.SGI() {
+			tokens = append(tokens, "short GI")
+		} else {
+			tokens = append(tokens, "long GI")
+		}
+	}
+	if self.Known.SGINSYMDisambiguation() {
+		if self.Flags.SGINSYMMod() {
+			tokens = append(tokens, "NSYM mod 10=9")
+		} else {
+			tokens = append(tokens, "NSYM mod 10!=9 or no short GI")
+		}
+	}
+	if self.Known.LDPCExtraOFDMSymbol() {
+		if self.Flags.LDPCExtraOFDMSymbol() {
+			tokens = append(tokens, "LDPC extra OFDM symbols")
+		} else {
+			tokens = append(tokens, "no LDPC extra OFDM symbols")
+		}
+	}
+	if self.Known.Beamformed() {
+		if self.Flags.Beamformed() {
+			tokens = append(tokens, "beamformed")
+		} else {
+			tokens = append(tokens, "no beamformed")
+		}
+	}
+	if self.Known.Bandwidth() {
+		token := "?"
+		switch self.Bandwidth & 0x1f {
+		case 0:
+			token = "20"
+		case 1:
+			token = "40"
+		case 2:
+			token = "40(20L)"
+		case 3:
+			token = "40(20U)"
+		case 4:
+			token = "80"
+		case 5:
+			token = "80(40L)"
+		case 6:
+			token = "80(40U)"
+		case 7:
+			token = "80(20LL)"
+		case 8:
+			token = "80(20LU)"
+		case 9:
+			token = "80(20UL)"
+		case 10:
+			token = "80(20UU)"
+		case 11:
+			token = "160"
+		case 12:
+			token = "160(80L)"
+		case 13:
+			token = "160(80U)"
+		case 14:
+			token = "160(40LL)"
+		case 15:
+			token = "160(40LU)"
+		case 16:
+			token = "160(40UL)"
+		case 17:
+			token = "160(40UU)"
+		case 18:
+			token = "160(20LLL)"
+		case 19:
+			token = "160(20LLU)"
+		case 20:
+			token = "160(20LUL)"
+		case 21:
+			token = "160(20LUU)"
+		case 22:
+			token = "160(20ULL)"
+		case 23:
+			token = "160(20ULU)"
+		case 24:
+			token = "160(20UUL)"
+		case 25:
+			token = "160(20UUU)"
+		}
+		tokens = append(tokens, token)
+	}
+	for i, MCSNSS := range self.MCSNSS {
+		if MCSNSS.Present() {
+			fec := "?"
+			switch self.Coding & (1 << uint8(i)) {
+			case 0:
+				fec = "BCC"
+			case 1:
+				fec = "LDPC"
+			}
+			tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec))
+		}
+	}
+	if self.Known.GroupId() {
+		tokens = append(tokens,
+			fmt.Sprintf("group=%d", self.GroupId))
+	}
+	if self.Known.PartialAID() {
+		tokens = append(tokens,
+			fmt.Sprintf("partial-AID=%d", self.PartialAID))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapVHTKnown uint16
+
+const (
+	RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota
+	RadioTapVHTKnownTXOPPSNotAllowed
+	RadioTapVHTKnownGI
+	RadioTapVHTKnownSGINSYMDisambiguation
+	RadioTapVHTKnownLDPCExtraOFDMSymbol
+	RadioTapVHTKnownBeamformed
+	RadioTapVHTKnownBandwidth
+	RadioTapVHTKnownGroupId
+	RadioTapVHTKnownPartialAID
+)
+
+func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 }
+func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool {
+	return self&RadioTapVHTKnownTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 }
+func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool {
+	return self&RadioTapVHTKnownSGINSYMDisambiguation != 0
+}
+func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool {
+	return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 }
+func (self RadioTapVHTKnown) Bandwidth() bool  { return self&RadioTapVHTKnownBandwidth != 0 }
+func (self RadioTapVHTKnown) GroupId() bool    { return self&RadioTapVHTKnownGroupId != 0 }
+func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 }
+
+type RadioTapVHTFlags uint8
+
+const (
+	RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota
+	RadioTapVHTFlagsTXOPPSNotAllowed
+	RadioTapVHTFlagsSGI
+	RadioTapVHTFlagsSGINSYMMod
+	RadioTapVHTFlagsLDPCExtraOFDMSymbol
+	RadioTapVHTFlagsBeamformed
+)
+
+func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 }
+func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool {
+	return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTFlags) SGI() bool        { return self&RadioTapVHTFlagsSGI != 0 }
+func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 }
+func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool {
+	return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 }
+
+type RadioTapVHTMCSNSS uint8
+
+func (self RadioTapVHTMCSNSS) Present() bool {
+	return self&0x0F != 0
+}
+
+func (self RadioTapVHTMCSNSS) String() string {
+	return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4))
+}
+
+func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error {
+	d := &RadioTap{}
+	// TODO: Should we set LinkLayer here? And implement LinkFlow
+	return decodingLayerDecoder(d, data, p)
+}
+
+type RadioTap struct {
+	BaseLayer
+
+	// Version 0. Only increases for drastic changes, introduction of compatible new fields does not count.
+	Version uint8
+	// Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields.
+	Length uint16
+	// Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31.
+	Present RadioTapPresent
+	// TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only.
+	TSFT  uint64
+	Flags RadioTapFlags
+	// Rate Tx/Rx data rate
+	Rate RadioTapRate
+	// ChannelFrequency Tx/Rx frequency in MHz, followed by flags
+	ChannelFrequency RadioTapChannelFrequency
+	ChannelFlags     RadioTapChannelFlags
+	// FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte).
+	FHSS uint16
+	// DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt.
+	DBMAntennaSignal int8
+	// DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt.
+	DBMAntennaNoise int8
+	// LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets.
+	LockQuality uint16
+	// TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration.  0 is max power. Monotonically nondecreasing with lower power levels.
+	TxAttenuation uint16
+	// DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration.  0 is max power.  Monotonically nondecreasing with lower power levels.
+	DBTxAttenuation uint16
+	// DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port.
+	DBMTxPower int8
+	// Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0.
+	Antenna uint8
+	// DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference.
+	DBAntennaSignal uint8
+	// DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point.
+	DBAntennaNoise uint8
+	//
+	RxFlags     RadioTapRxFlags
+	TxFlags     RadioTapTxFlags
+	RtsRetries  uint8
+	DataRetries uint8
+	MCS         RadioTapMCS
+	AMPDUStatus RadioTapAMPDUStatus
+	VHT         RadioTapVHT
+}
+
+func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap }
+
+func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Version = uint8(data[0])
+	m.Length = binary.LittleEndian.Uint16(data[2:4])
+	m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8]))
+
+	offset := uint16(4)
+
+	for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 {
+		// This parser only handles standard radiotap namespace,
+		// and expects all fields are packed in the first it_present.
+		// Extended bitmap will be just ignored.
+		offset += 4
+	}
+	offset += 4 // skip the bitmap
+
+	if m.Present.TSFT() {
+		offset += align(offset, 8)
+		m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8])
+		offset += 8
+	}
+	if m.Present.Flags() {
+		m.Flags = RadioTapFlags(data[offset])
+		offset++
+	}
+	if m.Present.Rate() {
+		m.Rate = RadioTapRate(data[offset])
+		offset++
+	}
+	if m.Present.Channel() {
+		offset += align(offset, 2)
+		m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2]))
+		offset += 2
+		m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2]))
+		offset += 2
+	}
+	if m.Present.FHSS() {
+		m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBMAntennaSignal() {
+		m.DBMAntennaSignal = int8(data[offset])
+		offset++
+	}
+	if m.Present.DBMAntennaNoise() {
+		m.DBMAntennaNoise = int8(data[offset])
+		offset++
+	}
+	if m.Present.LockQuality() {
+		offset += align(offset, 2)
+		m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.TxAttenuation() {
+		offset += align(offset, 2)
+		m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBTxAttenuation() {
+		offset += align(offset, 2)
+		m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBMTxPower() {
+		m.DBMTxPower = int8(data[offset])
+		offset++
+	}
+	if m.Present.Antenna() {
+		m.Antenna = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DBAntennaSignal() {
+		m.DBAntennaSignal = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DBAntennaNoise() {
+		m.DBAntennaNoise = uint8(data[offset])
+		offset++
+	}
+	if m.Present.RxFlags() {
+		offset += align(offset, 2)
+		m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:]))
+		offset += 2
+	}
+	if m.Present.TxFlags() {
+		offset += align(offset, 2)
+		m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:]))
+		offset += 2
+	}
+	if m.Present.RtsRetries() {
+		m.RtsRetries = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DataRetries() {
+		m.DataRetries = uint8(data[offset])
+		offset++
+	}
+	if m.Present.MCS() {
+		m.MCS = RadioTapMCS{
+			RadioTapMCSKnown(data[offset]),
+			RadioTapMCSFlags(data[offset+1]),
+			uint8(data[offset+2]),
+		}
+		offset += 3
+	}
+	if m.Present.AMPDUStatus() {
+		offset += align(offset, 4)
+		m.AMPDUStatus = RadioTapAMPDUStatus{
+			Reference: binary.LittleEndian.Uint32(data[offset:]),
+			Flags:     RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])),
+			CRC:       uint8(data[offset+6]),
+		}
+		offset += 8
+	}
+	if m.Present.VHT() {
+		offset += align(offset, 2)
+		m.VHT = RadioTapVHT{
+			Known:     RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])),
+			Flags:     RadioTapVHTFlags(data[offset+2]),
+			Bandwidth: uint8(data[offset+3]),
+			MCSNSS: [4]RadioTapVHTMCSNSS{
+				RadioTapVHTMCSNSS(data[offset+4]),
+				RadioTapVHTMCSNSS(data[offset+5]),
+				RadioTapVHTMCSNSS(data[offset+6]),
+				RadioTapVHTMCSNSS(data[offset+7]),
+			},
+			Coding:     uint8(data[offset+8]),
+			GroupId:    uint8(data[offset+9]),
+			PartialAID: binary.LittleEndian.Uint16(data[offset+10:]),
+		}
+		offset += 12
+	}
+
+	payload := data[m.Length:]
+
+	// Remove non standard padding used by some Wi-Fi drivers
+	if m.Flags.Datapad() &&
+		payload[0]&0xC == 0x8 { //&& // Data frame
+		headlen := 24
+		if payload[0]&0x8C == 0x88 { // QoS
+			headlen += 2
+		}
+		if payload[1]&0x3 == 0x3 { // 4 addresses
+			headlen += 2
+		}
+		if headlen%4 == 2 {
+			payload = append(payload[:headlen], payload[headlen+2:len(payload)]...)
+		}
+	}
+
+	if !m.Flags.FCS() {
+		// Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum
+		// If a user is handing in subslices or packets from a buffered stream, the capacity of the slice
+		// may extend beyond the len, rather than expecting callers to enforce cap==len on every packet
+		// we take the hit in this one case and do a reallocation.  If the user DOES enforce cap==len
+		// then the reallocation will happen anyway on the append.  This is requried because the append
+		// write to the memory directly after the payload if there is sufficient capacity, which callers
+		// may not expect.
+		reallocPayload := make([]byte, len(payload)+4)
+		copy(reallocPayload[0:len(payload)], payload)
+		h := crc32.NewIEEE()
+		h.Write(payload)
+		binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32())
+		payload = reallocPayload
+	}
+	m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload}
+
+	return nil
+}
+
+func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf := make([]byte, 1024)
+
+	buf[0] = m.Version
+	buf[1] = 0
+
+	binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present))
+
+	offset := uint16(4)
+
+	for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 {
+		offset += 4
+	}
+
+	offset += 4
+
+	if m.Present.TSFT() {
+		offset += align(offset, 8)
+		binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT)
+		offset += 8
+	}
+
+	if m.Present.Flags() {
+		buf[offset] = uint8(m.Flags)
+		offset++
+	}
+
+	if m.Present.Rate() {
+		buf[offset] = uint8(m.Rate)
+		offset++
+	}
+
+	if m.Present.Channel() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency))
+		offset += 2
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags))
+		offset += 2
+	}
+
+	if m.Present.FHSS() {
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS)
+		offset += 2
+	}
+
+	if m.Present.DBMAntennaSignal() {
+		buf[offset] = byte(m.DBMAntennaSignal)
+		offset++
+	}
+
+	if m.Present.DBMAntennaNoise() {
+		buf[offset] = byte(m.DBMAntennaNoise)
+		offset++
+	}
+
+	if m.Present.LockQuality() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality)
+		offset += 2
+	}
+
+	if m.Present.TxAttenuation() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation)
+		offset += 2
+	}
+
+	if m.Present.DBTxAttenuation() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation)
+		offset += 2
+	}
+
+	if m.Present.DBMTxPower() {
+		buf[offset] = byte(m.DBMTxPower)
+		offset++
+	}
+
+	if m.Present.Antenna() {
+		buf[offset] = uint8(m.Antenna)
+		offset++
+	}
+
+	if m.Present.DBAntennaSignal() {
+		buf[offset] = uint8(m.DBAntennaSignal)
+		offset++
+	}
+
+	if m.Present.DBAntennaNoise() {
+		buf[offset] = uint8(m.DBAntennaNoise)
+		offset++
+	}
+
+	if m.Present.RxFlags() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags))
+		offset += 2
+	}
+
+	if m.Present.TxFlags() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags))
+		offset += 2
+	}
+
+	if m.Present.RtsRetries() {
+		buf[offset] = m.RtsRetries
+		offset++
+	}
+
+	if m.Present.DataRetries() {
+		buf[offset] = m.DataRetries
+		offset++
+	}
+
+	if m.Present.MCS() {
+		buf[offset] = uint8(m.MCS.Known)
+		buf[offset+1] = uint8(m.MCS.Flags)
+		buf[offset+2] = uint8(m.MCS.MCS)
+
+		offset += 3
+	}
+
+	if m.Present.AMPDUStatus() {
+		offset += align(offset, 4)
+
+		binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference)
+		binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags))
+
+		buf[offset+6] = m.AMPDUStatus.CRC
+
+		offset += 8
+	}
+
+	if m.Present.VHT() {
+		offset += align(offset, 2)
+
+		binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known))
+
+		buf[offset+2] = uint8(m.VHT.Flags)
+		buf[offset+3] = uint8(m.VHT.Bandwidth)
+		buf[offset+4] = uint8(m.VHT.MCSNSS[0])
+		buf[offset+5] = uint8(m.VHT.MCSNSS[1])
+		buf[offset+6] = uint8(m.VHT.MCSNSS[2])
+		buf[offset+7] = uint8(m.VHT.MCSNSS[3])
+		buf[offset+8] = uint8(m.VHT.Coding)
+		buf[offset+9] = uint8(m.VHT.GroupId)
+
+		binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID)
+
+		offset += 12
+	}
+
+	packetBuf, err := b.PrependBytes(int(offset))
+
+	if err != nil {
+		return err
+	}
+
+	if opts.FixLengths {
+		m.Length = offset
+	}
+
+	binary.LittleEndian.PutUint16(buf[2:4], m.Length)
+
+	copy(packetBuf, buf)
+
+	return nil
+}
+
+func (m *RadioTap) CanDecode() gopacket.LayerClass    { return LayerTypeRadioTap }
+func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/google/gopacket/layers/rudp.go
new file mode 100644
index 0000000..8435129
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rudp.go
@@ -0,0 +1,93 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+type RUDP struct {
+	BaseLayer
+	SYN, ACK, EACK, RST, NUL bool
+	Version                  uint8
+	HeaderLength             uint8
+	SrcPort, DstPort         RUDPPort
+	DataLength               uint16
+	Seq, Ack, Checksum       uint32
+	VariableHeaderArea       []byte
+	// RUDPHeaderSyn contains SYN information for the RUDP packet,
+	// if the SYN flag is set
+	*RUDPHeaderSYN
+	// RUDPHeaderEack contains EACK information for the RUDP packet,
+	// if the EACK flag is set.
+	*RUDPHeaderEACK
+}
+
+type RUDPHeaderSYN struct {
+	MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16
+}
+
+type RUDPHeaderEACK struct {
+	SeqsReceivedOK []uint32
+}
+
+// LayerType returns gopacket.LayerTypeRUDP.
+func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP }
+
+func decodeRUDP(data []byte, p gopacket.PacketBuilder) error {
+	r := &RUDP{
+		SYN:          data[0]&0x80 != 0,
+		ACK:          data[0]&0x40 != 0,
+		EACK:         data[0]&0x20 != 0,
+		RST:          data[0]&0x10 != 0,
+		NUL:          data[0]&0x08 != 0,
+		Version:      data[0] & 0x3,
+		HeaderLength: data[1],
+		SrcPort:      RUDPPort(data[2]),
+		DstPort:      RUDPPort(data[3]),
+		DataLength:   binary.BigEndian.Uint16(data[4:6]),
+		Seq:          binary.BigEndian.Uint32(data[6:10]),
+		Ack:          binary.BigEndian.Uint32(data[10:14]),
+		Checksum:     binary.BigEndian.Uint32(data[14:18]),
+	}
+	if r.HeaderLength < 9 {
+		return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength)
+	}
+	hlen := int(r.HeaderLength) * 2
+	r.Contents = data[:hlen]
+	r.Payload = data[hlen : hlen+int(r.DataLength)]
+	r.VariableHeaderArea = data[18:hlen]
+	headerData := r.VariableHeaderArea
+	switch {
+	case r.SYN:
+		if len(headerData) != 6 {
+			return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData))
+		}
+		r.RUDPHeaderSYN = &RUDPHeaderSYN{
+			MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]),
+			MaxSegmentSize:         binary.BigEndian.Uint16(headerData[2:4]),
+			OptionFlags:            binary.BigEndian.Uint16(headerData[4:6]),
+		}
+	case r.EACK:
+		if len(headerData)%4 != 0 {
+			return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData))
+		}
+		r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)}
+		for i := 0; i < len(headerData); i += 4 {
+			r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4])
+		}
+	}
+	p.AddLayer(r)
+	p.SetTransportLayer(r)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (r *RUDP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)})
+}
diff --git a/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/google/gopacket/layers/sctp.go
new file mode 100644
index 0000000..511176e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sctp.go
@@ -0,0 +1,746 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc32"
+
+	"github.com/google/gopacket"
+)
+
+// SCTP contains information on the top level of an SCTP packet.
+type SCTP struct {
+	BaseLayer
+	SrcPort, DstPort SCTPPort
+	VerificationTag  uint32
+	Checksum         uint32
+	sPort, dPort     []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTP
+func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP }
+
+func decodeSCTP(data []byte, p gopacket.PacketBuilder) error {
+	sctp := &SCTP{}
+	err := sctp.DecodeFromBytes(data, p)
+	p.AddLayer(sctp)
+	p.SetTransportLayer(sctp)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(sctpChunkTypePrefixDecoder)
+}
+
+var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)
+
+// TransportFlow returns a flow based on the source and destination SCTP port.
+func (s *SCTP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort)
+}
+
+func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error {
+	chunkType := SCTPChunkType(data[0])
+	return chunkType.Decode(data, p)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(12)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort))
+	binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag)
+	if opts.ComputeChecksums {
+		// Note:  MakeTable(Castagnoli) actually only creates the table once, then
+		// passes back a singleton on every other call, so this shouldn't cause
+		// excessive memory allocation.
+		binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli)))
+	}
+	return nil
+}
+
+func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		return errors.New("Invalid SCTP common header length")
+	}
+	sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2]))
+	sctp.sPort = data[:2]
+	sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4]))
+	sctp.dPort = data[2:4]
+	sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8])
+	sctp.Checksum = binary.BigEndian.Uint32(data[8:12])
+	sctp.BaseLayer = BaseLayer{data[:12], data[12:]}
+
+	return nil
+}
+
+func (t *SCTP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSCTP
+}
+
+func (t *SCTP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// SCTPChunk contains the common fields in all SCTP chunks.
+type SCTPChunk struct {
+	BaseLayer
+	Type   SCTPChunkType
+	Flags  uint8
+	Length uint16
+	// ActualLength is the total length of an SCTP chunk, including padding.
+	// SCTP chunks start and end on 4-byte boundaries.  So if a chunk has a length
+	// of 18, it means that it has data up to and including byte 18, then padding
+	// up to the next 4-byte boundary, 20.  In this case, Length would be 18, and
+	// ActualLength would be 20.
+	ActualLength int
+}
+
+func roundUpToNearest4(i int) int {
+	if i%4 == 0 {
+		return i
+	}
+	return i + 4 - (i % 4)
+}
+
+func decodeSCTPChunk(data []byte) (SCTPChunk, error) {
+	length := binary.BigEndian.Uint16(data[2:4])
+	if length < 4 {
+		return SCTPChunk{}, errors.New("invalid SCTP chunk length")
+	}
+	actual := roundUpToNearest4(int(length))
+	ct := SCTPChunkType(data[0])
+
+	// For SCTP Data, use a separate layer for the payload
+	delta := 0
+	if ct == SCTPChunkTypeData {
+		delta = int(actual) - int(length)
+		actual = 16
+	}
+
+	return SCTPChunk{
+		Type:         ct,
+		Flags:        data[1],
+		Length:       length,
+		ActualLength: actual,
+		BaseLayer:    BaseLayer{data[:actual], data[actual : len(data)-delta]},
+	}, nil
+}
+
+// SCTPParameter is a TLV parameter inside a SCTPChunk.
+type SCTPParameter struct {
+	Type         uint16
+	Length       uint16
+	ActualLength int
+	Value        []byte
+}
+
+func decodeSCTPParameter(data []byte) SCTPParameter {
+	length := binary.BigEndian.Uint16(data[2:4])
+	return SCTPParameter{
+		Type:         binary.BigEndian.Uint16(data[0:2]),
+		Length:       length,
+		Value:        data[4:length],
+		ActualLength: roundUpToNearest4(int(length)),
+	}
+}
+
+func (p SCTPParameter) Bytes() []byte {
+	length := 4 + len(p.Value)
+	data := make([]byte, roundUpToNearest4(length))
+	binary.BigEndian.PutUint16(data[0:2], p.Type)
+	binary.BigEndian.PutUint16(data[2:4], uint16(length))
+	copy(data[4:], p.Value)
+	return data
+}
+
+// SCTPUnknownChunkType is the layer type returned when we don't recognize the
+// chunk type.  Since there's a length in a known location, we can skip over
+// it even if we don't know what it is, and continue parsing the rest of the
+// chunks.  This chunk is stored as an ErrorLayer in the packet.
+type SCTPUnknownChunkType struct {
+	SCTPChunk
+	bytes []byte
+}
+
+func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPUnknownChunkType{SCTPChunk: chunk}
+	sc.bytes = data[:sc.ActualLength]
+	p.AddLayer(sc)
+	p.SetErrorLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(s.ActualLength)
+	if err != nil {
+		return err
+	}
+	copy(bytes, s.bytes)
+	return nil
+}
+
+// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType.
+func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType }
+
+// Payload returns all bytes in this header, including the decoded Type, Length,
+// and Flags.
+func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes }
+
+// Error implements ErrorLayer.
+func (s *SCTPUnknownChunkType) Error() error {
+	return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type)
+}
+
+// SCTPData is the SCTP Data chunk layer.
+type SCTPData struct {
+	SCTPChunk
+	Unordered, BeginFragment, EndFragment bool
+	TSN                                   uint32
+	StreamId                              uint16
+	StreamSequence                        uint16
+	PayloadProtocol                       SCTPPayloadProtocol
+}
+
+// LayerType returns gopacket.LayerTypeSCTPData.
+func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData }
+
+// SCTPPayloadProtocol represents a payload protocol
+type SCTPPayloadProtocol uint32
+
+// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml
+const (
+	SCTPProtocolReserved  SCTPPayloadProtocol = 0
+	SCTPPayloadUIA                            = 1
+	SCTPPayloadM2UA                           = 2
+	SCTPPayloadM3UA                           = 3
+	SCTPPayloadSUA                            = 4
+	SCTPPayloadM2PA                           = 5
+	SCTPPayloadV5UA                           = 6
+	SCTPPayloadH248                           = 7
+	SCTPPayloadBICC                           = 8
+	SCTPPayloadTALI                           = 9
+	SCTPPayloadDUA                            = 10
+	SCTPPayloadASAP                           = 11
+	SCTPPayloadENRP                           = 12
+	SCTPPayloadH323                           = 13
+	SCTPPayloadQIPC                           = 14
+	SCTPPayloadSIMCO                          = 15
+	SCTPPayloadDDPSegment                     = 16
+	SCTPPayloadDDPStream                      = 17
+	SCTPPayloadS1AP                           = 18
+)
+
+func (p SCTPPayloadProtocol) String() string {
+	switch p {
+	case SCTPProtocolReserved:
+		return "Reserved"
+	case SCTPPayloadUIA:
+		return "UIA"
+	case SCTPPayloadM2UA:
+		return "M2UA"
+	case SCTPPayloadM3UA:
+		return "M3UA"
+	case SCTPPayloadSUA:
+		return "SUA"
+	case SCTPPayloadM2PA:
+		return "M2PA"
+	case SCTPPayloadV5UA:
+		return "V5UA"
+	case SCTPPayloadH248:
+		return "H.248"
+	case SCTPPayloadBICC:
+		return "BICC"
+	case SCTPPayloadTALI:
+		return "TALI"
+	case SCTPPayloadDUA:
+		return "DUA"
+	case SCTPPayloadASAP:
+		return "ASAP"
+	case SCTPPayloadENRP:
+		return "ENRP"
+	case SCTPPayloadH323:
+		return "H.323"
+	case SCTPPayloadQIPC:
+		return "QIPC"
+	case SCTPPayloadSIMCO:
+		return "SIMCO"
+	case SCTPPayloadDDPSegment:
+		return "DDPSegment"
+	case SCTPPayloadDDPStream:
+		return "DDPStream"
+	case SCTPPayloadS1AP:
+		return "S1AP"
+	}
+	return fmt.Sprintf("Unknown(%d)", p)
+}
+
+func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPData{
+		SCTPChunk:       chunk,
+		Unordered:       data[1]&0x4 != 0,
+		BeginFragment:   data[1]&0x2 != 0,
+		EndFragment:     data[1]&0x1 != 0,
+		TSN:             binary.BigEndian.Uint32(data[4:8]),
+		StreamId:        binary.BigEndian.Uint16(data[8:10]),
+		StreamSequence:  binary.BigEndian.Uint16(data[10:12]),
+		PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])),
+	}
+	// Length is the length in bytes of the data, INCLUDING the 16-byte header.
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	payload := b.Bytes()
+	// Pad the payload to a 32 bit boundary
+	if rem := len(payload) % 4; rem != 0 {
+		b.AppendBytes(4 - rem)
+	}
+	length := 16
+	bytes, err := b.PrependBytes(length)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	flags := uint8(0)
+	if sc.Unordered {
+		flags |= 0x4
+	}
+	if sc.BeginFragment {
+		flags |= 0x2
+	}
+	if sc.EndFragment {
+		flags |= 0x1
+	}
+	bytes[1] = flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload)))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.TSN)
+	binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId)
+	binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence)
+	binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol))
+	return nil
+}
+
+// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet.
+type SCTPInitParameter SCTPParameter
+
+// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck
+// messages.
+type SCTPInit struct {
+	SCTPChunk
+	InitiateTag                     uint32
+	AdvertisedReceiverWindowCredit  uint32
+	OutboundStreams, InboundStreams uint16
+	InitialTSN                      uint32
+	Parameters                      []SCTPInitParameter
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck.
+func (sc *SCTPInit) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeInitAck {
+		return LayerTypeSCTPInitAck
+	}
+	// sc.Type == SCTPChunkTypeInit
+	return LayerTypeSCTPInit
+}
+
+func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPInit{
+		SCTPChunk:                      chunk,
+		InitiateTag:                    binary.BigEndian.Uint32(data[4:8]),
+		AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+		OutboundStreams:                binary.BigEndian.Uint16(data[12:14]),
+		InboundStreams:                 binary.BigEndian.Uint16(data[14:16]),
+		InitialTSN:                     binary.BigEndian.Uint32(data[16:20]),
+	}
+	paramData := data[20:sc.ActualLength]
+	for len(paramData) > 0 {
+		p := SCTPInitParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 20 + len(payload)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag)
+	binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+	binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams)
+	binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams)
+	binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN)
+	copy(bytes[20:], payload)
+	return nil
+}
+
+// SCTPSack is the SCTP Selective ACK chunk layer.
+type SCTPSack struct {
+	SCTPChunk
+	CumulativeTSNAck               uint32
+	AdvertisedReceiverWindowCredit uint32
+	NumGapACKs, NumDuplicateTSNs   uint16
+	GapACKs                        []uint16
+	DuplicateTSNs                  []uint32
+}
+
+// LayerType return LayerTypeSCTPSack
+func (sc *SCTPSack) LayerType() gopacket.LayerType {
+	return LayerTypeSCTPSack
+}
+
+func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPSack{
+		SCTPChunk:                      chunk,
+		CumulativeTSNAck:               binary.BigEndian.Uint32(data[4:8]),
+		AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+		NumGapACKs:                     binary.BigEndian.Uint16(data[12:14]),
+		NumDuplicateTSNs:               binary.BigEndian.Uint16(data[14:16]),
+	}
+	// We maximize gapAcks and dupTSNs here so we're not allocating tons
+	// of memory based on a user-controlable field.  Our maximums are not exact,
+	// but should give us sane defaults... we'll still hit slice boundaries and
+	// fail if the user-supplied values are too high (in the for loops below), but
+	// the amount of memory we'll have allocated because of that should be small
+	// (< sc.ActualLength)
+	gapAcks := sc.SCTPChunk.ActualLength / 2
+	dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4
+	if gapAcks > int(sc.NumGapACKs) {
+		gapAcks = int(sc.NumGapACKs)
+	}
+	if dupTSNs > int(sc.NumDuplicateTSNs) {
+		dupTSNs = int(sc.NumDuplicateTSNs)
+	}
+	sc.GapACKs = make([]uint16, 0, gapAcks)
+	sc.DuplicateTSNs = make([]uint32, 0, dupTSNs)
+	bytesRemaining := data[16:]
+	for i := 0; i < int(sc.NumGapACKs); i++ {
+		sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2]))
+		bytesRemaining = bytesRemaining[2:]
+	}
+	for i := 0; i < int(sc.NumDuplicateTSNs); i++ {
+		sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4]))
+		bytesRemaining = bytesRemaining[4:]
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+	binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+	binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs)))
+	binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs)))
+	for i, v := range sc.GapACKs {
+		binary.BigEndian.PutUint16(bytes[16+i*2:], v)
+	}
+	offset := 16 + 2*len(sc.GapACKs)
+	for i, v := range sc.DuplicateTSNs {
+		binary.BigEndian.PutUint32(bytes[offset+i*4:], v)
+	}
+	return nil
+}
+
+// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and
+// heartbeat ack layers.
+type SCTPHeartbeatParameter SCTPParameter
+
+// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack.
+type SCTPHeartbeat struct {
+	SCTPChunk
+	Parameters []SCTPHeartbeatParameter
+}
+
+// LayerType returns gopacket.LayerTypeSCTPHeartbeat.
+func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeHeartbeatAck {
+		return LayerTypeSCTPHeartbeatAck
+	}
+	// sc.Type == SCTPChunkTypeHeartbeat
+	return LayerTypeSCTPHeartbeat
+}
+
+func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPHeartbeat{
+		SCTPChunk: chunk,
+	}
+	paramData := data[4:sc.Length]
+	for len(paramData) > 0 {
+		p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 4 + len(payload)
+
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], payload)
+	return nil
+}
+
+// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers.
+type SCTPErrorParameter SCTPParameter
+
+// SCTPError is the SCTP error layer, also used for SCTP aborts.
+type SCTPError struct {
+	SCTPChunk
+	Parameters []SCTPErrorParameter
+}
+
+// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError.
+func (sc *SCTPError) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeAbort {
+		return LayerTypeSCTPAbort
+	}
+	// sc.Type == SCTPChunkTypeError
+	return LayerTypeSCTPError
+}
+
+func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error {
+	// remarkably similar to decodeSCTPHeartbeat ;)
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPError{
+		SCTPChunk: chunk,
+	}
+	paramData := data[4:sc.Length]
+	for len(paramData) > 0 {
+		p := SCTPErrorParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 4 + len(payload)
+
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], payload)
+	return nil
+}
+
+// SCTPShutdown is the SCTP shutdown layer.
+type SCTPShutdown struct {
+	SCTPChunk
+	CumulativeTSNAck uint32
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdown.
+func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown }
+
+func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPShutdown{
+		SCTPChunk:        chunk,
+		CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 8)
+	binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+	return nil
+}
+
+// SCTPShutdownAck is the SCTP shutdown layer.
+type SCTPShutdownAck struct {
+	SCTPChunk
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdownAck.
+func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck }
+
+func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPShutdownAck{
+		SCTPChunk: chunk,
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 4)
+	return nil
+}
+
+// SCTPCookieEcho is the SCTP Cookie Echo layer.
+type SCTPCookieEcho struct {
+	SCTPChunk
+	Cookie []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTPCookieEcho.
+func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho }
+
+func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPCookieEcho{
+		SCTPChunk: chunk,
+	}
+	sc.Cookie = data[4:sc.Length]
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := 4 + len(sc.Cookie)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], sc.Cookie)
+	return nil
+}
+
+// This struct is used by all empty SCTP chunks (currently CookieAck and
+// ShutdownComplete).
+type SCTPEmptyLayer struct {
+	SCTPChunk
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or
+// LayerTypeSCTPCookieAck.
+func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeShutdownComplete {
+		return LayerTypeSCTPShutdownComplete
+	}
+	// sc.Type == SCTPChunkTypeCookieAck
+	return LayerTypeSCTPCookieAck
+}
+
+func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPEmptyLayer{
+		SCTPChunk: chunk,
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 4)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go
new file mode 100644
index 0000000..c56fe89
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sflow.go
@@ -0,0 +1,2480 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+This layer decodes SFlow version 5 datagrams.
+
+The specification can be found here: http://sflow.org/sflow_version_5.txt
+
+Additional developer information about sflow can be found at:
+http://sflow.org/developers/specifications.php
+
+And SFlow in general:
+http://sflow.org/index.php
+
+Two forms of sample data are defined: compact and expanded. The
+Specification has this to say:
+
+    Compact and expand forms of counter and flow samples are defined.
+    An agent must not mix compact/expanded encodings.  If an agent
+    will never use ifIndex numbers >= 2^24 then it must use compact
+    encodings for all interfaces.  Otherwise the expanded formats must
+    be used for all interfaces.
+
+This decoder only supports the compact form, because that is the only
+one for which data was avaialble.
+
+The datagram is composed of one or more samples of type flow or counter,
+and each sample is composed of one or more records describing the sample.
+A sample is a single instance of sampled inforamtion, and each record in
+the sample gives additional / supplimentary information about the sample.
+
+The following sample record types are supported:
+
+	Raw Packet Header
+	opaque = flow_data; enterprise = 0; format = 1
+
+	Extended Switch Data
+	opaque = flow_data; enterprise = 0; format = 1001
+
+	Extended Router Data
+	opaque = flow_data; enterprise = 0; format = 1002
+
+	Extended Gateway Data
+	opaque = flow_data; enterprise = 0; format = 1003
+
+	Extended User Data
+	opaque = flow_data; enterprise = 0; format = 1004
+
+	Extended URL Data
+	opaque = flow_data; enterprise = 0; format = 1005
+
+The following types of counter records are supported:
+
+	Generic Interface Counters - see RFC 2233
+	opaque = counter_data; enterprise = 0; format = 1
+
+	Ethernet Interface Counters - see RFC 2358
+	opaque = counter_data; enterprise = 0; format = 2
+
+SFlow is encoded using XDR (RFC4506). There are a few places
+where the standard 4-byte fields are partitioned into two
+bitfields of different lengths. I'm not sure why the designers
+chose to pack together two values like this in some places, and
+in others they use the entire 4-byte value to store a number that
+will never be more than a few bits. In any case, there are a couple
+of types defined to handle the decoding of these bitfields, and
+that's why they're there. */
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// SFlowRecord holds both flow sample records and counter sample records.
+// A Record is the structure that actually holds the sampled data
+// and / or counters.
+type SFlowRecord interface {
+}
+
+// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant
+// 2 bits, and an SFlowSourceValue in its least significant 30 bits.
+// These types and values define the meaning of the inteface information
+// presented in the sample metadata.
+type SFlowDataSource int32
+
+func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) {
+	leftField := sdc >> 30
+	rightField := uint32(0x3FFFFFFF) & uint32(sdc)
+	return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowDataSourceExpanded struct {
+	SourceIDClass SFlowSourceFormat
+	SourceIDIndex SFlowSourceValue
+}
+
+func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) {
+	leftField := sdce.SourceIDClass >> 30
+	rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex)
+	return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowSourceFormat uint32
+
+type SFlowSourceValue uint32
+
+const (
+	SFlowTypeSingleInterface      SFlowSourceFormat = 0
+	SFlowTypePacketDiscarded      SFlowSourceFormat = 1
+	SFlowTypeMultipleDestinations SFlowSourceFormat = 2
+)
+
+func (sdf SFlowSourceFormat) String() string {
+	switch sdf {
+	case SFlowTypeSingleInterface:
+		return "Single Interface"
+	case SFlowTypePacketDiscarded:
+		return "Packet Discarded"
+	case SFlowTypeMultipleDestinations:
+		return "Multiple Destinations"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+func decodeSFlow(data []byte, p gopacket.PacketBuilder) error {
+	s := &SFlowDatagram{}
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	p.SetApplicationLayer(s)
+	return nil
+}
+
+// SFlowDatagram is the outermost container which holds some basic information
+// about the reporting agent, and holds at least one sample record
+type SFlowDatagram struct {
+	BaseLayer
+
+	DatagramVersion uint32
+	AgentAddress    net.IP
+	SubAgentID      uint32
+	SequenceNumber  uint32
+	AgentUptime     uint32
+	SampleCount     uint32
+	FlowSamples     []SFlowFlowSample
+	CounterSamples  []SFlowCounterSample
+}
+
+// An SFlow  datagram's outer container has the following
+// structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int sFlow version (2|4|5)           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   int IP version of the Agent (1=v4|2=v6)     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /    Agent IP address (v4=4byte|v6=16byte)      /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sub agent id                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |         int datagram sequence number          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |            int switch uptime in ms            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int n samples in datagram            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                  n samples                    /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// SFlowDataFormat encodes the EnterpriseID in the most
+// significant 12 bits, and the SampleType in the least significant
+// 20 bits.
+type SFlowDataFormat uint32
+
+func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) {
+	leftField := sdf >> 12
+	rightField := uint32(0xFFF) & uint32(sdf)
+	return SFlowEnterpriseID(leftField), SFlowSampleType(rightField)
+}
+
+// SFlowEnterpriseID is used to differentiate between the
+// official SFlow standard, and other, vendor-specific
+// types of flow data. (Similiar to SNMP's enterprise MIB
+// OIDs) Only the office SFlow Enterprise ID is decoded
+// here.
+type SFlowEnterpriseID uint32
+
+const (
+	SFlowStandard SFlowEnterpriseID = 0
+)
+
+func (eid SFlowEnterpriseID) String() string {
+	switch eid {
+	case SFlowStandard:
+		return "Standard SFlow"
+	default:
+		return ""
+	}
+}
+
+func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID {
+	return SFlowStandard
+}
+
+// SFlowSampleType specifies the type of sample. Only flow samples
+// and counter samples are supported
+type SFlowSampleType uint32
+
+const (
+	SFlowTypeFlowSample            SFlowSampleType = 1
+	SFlowTypeCounterSample         SFlowSampleType = 2
+	SFlowTypeExpandedFlowSample    SFlowSampleType = 3
+	SFlowTypeExpandedCounterSample SFlowSampleType = 4
+)
+
+func (st SFlowSampleType) GetType() SFlowSampleType {
+	switch st {
+	case SFlowTypeFlowSample:
+		return SFlowTypeFlowSample
+	case SFlowTypeCounterSample:
+		return SFlowTypeCounterSample
+	case SFlowTypeExpandedFlowSample:
+		return SFlowTypeExpandedFlowSample
+	case SFlowTypeExpandedCounterSample:
+		return SFlowTypeExpandedCounterSample
+	default:
+		panic("Invalid Sample Type")
+	}
+}
+
+func (st SFlowSampleType) String() string {
+	switch st {
+	case SFlowTypeFlowSample:
+		return "Flow Sample"
+	case SFlowTypeCounterSample:
+		return "Counter Sample"
+	case SFlowTypeExpandedFlowSample:
+		return "Expanded Flow Sample"
+	case SFlowTypeExpandedCounterSample:
+		return "Expanded Counter Sample"
+	default:
+		return ""
+	}
+}
+
+func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) Payload() []byte { return nil }
+
+func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+// SFlowIPType determines what form the IP address being decoded will
+// take. This is an XDR union type allowing for both IPv4 and IPv6
+type SFlowIPType uint32
+
+const (
+	SFlowIPv4 SFlowIPType = 1
+	SFlowIPv6 SFlowIPType = 2
+)
+
+func (s SFlowIPType) String() string {
+	switch s {
+	case SFlowIPv4:
+		return "IPv4"
+	case SFlowIPv6:
+		return "IPv6"
+	default:
+		return ""
+	}
+}
+
+func (s SFlowIPType) Length() int {
+	switch s {
+	case SFlowIPv4:
+		return 4
+	case SFlowIPv6:
+		return 16
+	default:
+		return 0
+	}
+}
+
+func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var agentAddressType SFlowIPType
+
+	data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4]))
+	data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()]
+	data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4])
+
+	if s.SampleCount < 1 {
+		return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount)
+	}
+	for i := uint32(0); i < s.SampleCount; i++ {
+		sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4]))
+		_, sampleType := sdf.decode()
+		switch sampleType {
+		case SFlowTypeFlowSample:
+			if flowSample, err := decodeFlowSample(&data, false); err == nil {
+				s.FlowSamples = append(s.FlowSamples, flowSample)
+			} else {
+				return err
+			}
+		case SFlowTypeCounterSample:
+			if counterSample, err := decodeCounterSample(&data, false); err == nil {
+				s.CounterSamples = append(s.CounterSamples, counterSample)
+			} else {
+				return err
+			}
+		case SFlowTypeExpandedFlowSample:
+			if flowSample, err := decodeFlowSample(&data, true); err == nil {
+				s.FlowSamples = append(s.FlowSamples, flowSample)
+			} else {
+				return err
+			}
+		case SFlowTypeExpandedCounterSample:
+			if counterSample, err := decodeCounterSample(&data, true); err == nil {
+				s.CounterSamples = append(s.CounterSamples, counterSample)
+			} else {
+				return err
+			}
+
+		default:
+			return fmt.Errorf("Unsupported SFlow sample type %d", sampleType)
+		}
+	}
+	return nil
+}
+
+// SFlowFlowSample represents a sampled packet and contains
+// one or more records describing the packet
+type SFlowFlowSample struct {
+	EnterpriseID          SFlowEnterpriseID
+	Format                SFlowSampleType
+	SampleLength          uint32
+	SequenceNumber        uint32
+	SourceIDClass         SFlowSourceFormat
+	SourceIDIndex         SFlowSourceValue
+	SamplingRate          uint32
+	SamplePool            uint32
+	Dropped               uint32
+	InputInterfaceFormat  uint32
+	InputInterface        uint32
+	OutputInterfaceFormat uint32
+	OutputInterface       uint32
+	RecordCount           uint32
+	Records               []SFlowRecord
+}
+
+// Flow samples have the following structure. Note
+// the bit fields to encode the Enterprise ID and the
+// Flow record format: type 1
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  sample length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |id type |       src id index value             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sampling rate               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int sample pool                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    int drops                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 int input ifIndex             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int output ifIndex             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   flow records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Flow samples have the following structure.
+// Flow record format: type 3
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  sample length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int src id type                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |             int src id index value            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sampling rate               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int sample pool                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    int drops                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int input interface format          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int input interface value           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int output interface format         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int output interface value          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   flow records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowFlowDataFormat uint32
+
+func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) {
+	leftField := fdf >> 12
+	rightField := uint32(0xFFF) & uint32(fdf)
+	return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField)
+}
+
+func (fs SFlowFlowSample) GetRecords() []SFlowRecord {
+	return fs.Records
+}
+
+func (fs SFlowFlowSample) GetType() SFlowSampleType {
+	return SFlowTypeFlowSample
+}
+
+func skipRecord(data *[]byte) {
+	recordLength := int(binary.BigEndian.Uint32((*data)[4:]))
+	*data = (*data)[(recordLength+((4-recordLength)%4))+8:]
+}
+
+func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) {
+	s := SFlowFlowSample{}
+	var sdf SFlowDataFormat
+	*data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	var sdc SFlowDataSource
+
+	s.EnterpriseID, s.Format = sdf.decode()
+	*data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if expanded {
+		*data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4]))
+		*data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4]))
+	} else {
+		*data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+		s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+	}
+	*data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	if expanded {
+		*data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		*data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		*data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		*data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	} else {
+		*data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		*data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	}
+	*data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	for i := uint32(0); i < s.RecordCount; i++ {
+		rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+		enterpriseID, flowRecordType := rdf.decode()
+
+		// Try to decode when EnterpriseID is 0 signaling
+		// default sflow structs are used according specification
+		// Unexpected behavior detected for e.g. with pmacct
+		if enterpriseID == 0 {
+			switch flowRecordType {
+			case SFlowTypeRawPacketFlow:
+				if record, err := decodeRawPacketFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedUserFlow:
+				if record, err := decodeExtendedUserFlow(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedUrlFlow:
+				if record, err := decodeExtendedURLRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedSwitchFlow:
+				if record, err := decodeExtendedSwitchFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedRouterFlow:
+				if record, err := decodeExtendedRouterFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedGatewayFlow:
+				if record, err := decodeExtendedGatewayFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeEthernetFrameFlow:
+				if record, err := decodeEthernetFrameFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeIpv4Flow:
+				if record, err := decodeSFlowIpv4Record(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeIpv6Flow:
+				if record, err := decodeSFlowIpv6Record(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedMlpsFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsFlow")
+			case SFlowTypeExtendedNatFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedNatFlow")
+			case SFlowTypeExtendedMlpsTunnelFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsTunnelFlow")
+			case SFlowTypeExtendedMlpsVcFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsVcFlow")
+			case SFlowTypeExtendedMlpsFecFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsFecFlow")
+			case SFlowTypeExtendedMlpsLvpFecFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow")
+			case SFlowTypeExtendedVlanFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedVlanFlow")
+			case SFlowTypeExtendedIpv4TunnelEgressFlow:
+				if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv4TunnelIngressFlow:
+				if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv6TunnelEgressFlow:
+				if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv6TunnelIngressFlow:
+				if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedDecapsulateEgressFlow:
+				if record, err := decodeExtendedDecapsulateEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedDecapsulateIngressFlow:
+				if record, err := decodeExtendedDecapsulateIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedVniEgressFlow:
+				if record, err := decodeExtendedVniEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedVniIngressFlow:
+				if record, err := decodeExtendedVniIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			default:
+				return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType)
+			}
+		} else {
+			skipRecord(data)
+		}
+	}
+	return s, nil
+}
+
+// Counter samples report information about various counter
+// objects. Typically these are items like IfInOctets, or
+// CPU / Memory stats, etc. SFlow will report these at regular
+// intervals as configured on the agent. If one were sufficiently
+// industrious, this could be used to replace the typical
+// SNMP polling used for such things.
+type SFlowCounterSample struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowSampleType
+	SampleLength   uint32
+	SequenceNumber uint32
+	SourceIDClass  SFlowSourceFormat
+	SourceIDIndex  SFlowSourceValue
+	RecordCount    uint32
+	Records        []SFlowRecord
+}
+
+// Counter samples have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |id type |       src id index value             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                counter records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowCounterDataFormat uint32
+
+func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) {
+	leftField := cdf >> 12
+	rightField := uint32(0xFFF) & uint32(cdf)
+	return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField)
+}
+
+// GetRecords will return a slice of interface types
+// representing records. A type switch can be used to
+// get at the underlying SFlowCounterRecordType.
+func (cs SFlowCounterSample) GetRecords() []SFlowRecord {
+	return cs.Records
+}
+
+// GetType will report the type of sample. Only the
+// compact form of counter samples is supported
+func (cs SFlowCounterSample) GetType() SFlowSampleType {
+	return SFlowTypeCounterSample
+}
+
+type SFlowCounterRecordType uint32
+
+const (
+	SFlowTypeGenericInterfaceCounters   SFlowCounterRecordType = 1
+	SFlowTypeEthernetInterfaceCounters  SFlowCounterRecordType = 2
+	SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3
+	SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4
+	SFlowTypeVLANCounters               SFlowCounterRecordType = 5
+	SFlowTypeLACPCounters               SFlowCounterRecordType = 7
+	SFlowTypeProcessorCounters          SFlowCounterRecordType = 1001
+	SFlowTypeOpenflowPortCounters       SFlowCounterRecordType = 1004
+	SFlowTypePORTNAMECounters           SFlowCounterRecordType = 1005
+	SFLowTypeAPPRESOURCESCounters       SFlowCounterRecordType = 2203
+	SFlowTypeOVSDPCounters              SFlowCounterRecordType = 2207
+)
+
+func (cr SFlowCounterRecordType) String() string {
+	switch cr {
+	case SFlowTypeGenericInterfaceCounters:
+		return "Generic Interface Counters"
+	case SFlowTypeEthernetInterfaceCounters:
+		return "Ethernet Interface Counters"
+	case SFlowTypeTokenRingInterfaceCounters:
+		return "Token Ring Interface Counters"
+	case SFlowType100BaseVGInterfaceCounters:
+		return "100BaseVG Interface Counters"
+	case SFlowTypeVLANCounters:
+		return "VLAN Counters"
+	case SFlowTypeLACPCounters:
+		return "LACP Counters"
+	case SFlowTypeProcessorCounters:
+		return "Processor Counters"
+	case SFlowTypeOpenflowPortCounters:
+		return "Openflow Port Counters"
+	case SFlowTypePORTNAMECounters:
+		return "PORT NAME Counters"
+	case SFLowTypeAPPRESOURCESCounters:
+		return "App Resources Counters"
+	case SFlowTypeOVSDPCounters:
+		return "OVSDP Counters"
+	default:
+		return ""
+
+	}
+}
+
+func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) {
+	s := SFlowCounterSample{}
+	var sdc SFlowDataSource
+	var sdce SFlowDataSourceExpanded
+	var sdf SFlowDataFormat
+
+	*data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	s.EnterpriseID, s.Format = sdf.decode()
+	*data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if expanded {
+		*data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))}
+		s.SourceIDClass, s.SourceIDIndex = sdce.decode()
+	} else {
+		*data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+		s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+	}
+	*data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	for i := uint32(0); i < s.RecordCount; i++ {
+		cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+		_, counterRecordType := cdf.decode()
+		switch counterRecordType {
+		case SFlowTypeGenericInterfaceCounters:
+			if record, err := decodeGenericInterfaceCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeEthernetInterfaceCounters:
+			if record, err := decodeEthernetCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeTokenRingInterfaceCounters:
+			skipRecord(data)
+			return s, errors.New("skipping TypeTokenRingInterfaceCounters")
+		case SFlowType100BaseVGInterfaceCounters:
+			skipRecord(data)
+			return s, errors.New("skipping Type100BaseVGInterfaceCounters")
+		case SFlowTypeVLANCounters:
+			if record, err := decodeVLANCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeLACPCounters:
+			if record, err := decodeLACPCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeProcessorCounters:
+			if record, err := decodeProcessorCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeOpenflowPortCounters:
+			if record, err := decodeOpenflowportCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypePORTNAMECounters:
+			if record, err := decodePortnameCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFLowTypeAPPRESOURCESCounters:
+			if record, err := decodeAppresourcesCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeOVSDPCounters:
+			if record, err := decodeOVSDPCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		default:
+			return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType)
+		}
+	}
+	return s, nil
+}
+
+// SFlowBaseFlowRecord holds the fields common to all records
+// of type SFlowFlowRecordType
+type SFlowBaseFlowRecord struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowFlowRecordType
+	FlowDataLength uint32
+}
+
+func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType {
+	return bfr.Format
+}
+
+// SFlowFlowRecordType denotes what kind of Flow Record is
+// represented. See RFC 3176
+type SFlowFlowRecordType uint32
+
+const (
+	SFlowTypeRawPacketFlow                  SFlowFlowRecordType = 1
+	SFlowTypeEthernetFrameFlow              SFlowFlowRecordType = 2
+	SFlowTypeIpv4Flow                       SFlowFlowRecordType = 3
+	SFlowTypeIpv6Flow                       SFlowFlowRecordType = 4
+	SFlowTypeExtendedSwitchFlow             SFlowFlowRecordType = 1001
+	SFlowTypeExtendedRouterFlow             SFlowFlowRecordType = 1002
+	SFlowTypeExtendedGatewayFlow            SFlowFlowRecordType = 1003
+	SFlowTypeExtendedUserFlow               SFlowFlowRecordType = 1004
+	SFlowTypeExtendedUrlFlow                SFlowFlowRecordType = 1005
+	SFlowTypeExtendedMlpsFlow               SFlowFlowRecordType = 1006
+	SFlowTypeExtendedNatFlow                SFlowFlowRecordType = 1007
+	SFlowTypeExtendedMlpsTunnelFlow         SFlowFlowRecordType = 1008
+	SFlowTypeExtendedMlpsVcFlow             SFlowFlowRecordType = 1009
+	SFlowTypeExtendedMlpsFecFlow            SFlowFlowRecordType = 1010
+	SFlowTypeExtendedMlpsLvpFecFlow         SFlowFlowRecordType = 1011
+	SFlowTypeExtendedVlanFlow               SFlowFlowRecordType = 1012
+	SFlowTypeExtendedIpv4TunnelEgressFlow   SFlowFlowRecordType = 1023
+	SFlowTypeExtendedIpv4TunnelIngressFlow  SFlowFlowRecordType = 1024
+	SFlowTypeExtendedIpv6TunnelEgressFlow   SFlowFlowRecordType = 1025
+	SFlowTypeExtendedIpv6TunnelIngressFlow  SFlowFlowRecordType = 1026
+	SFlowTypeExtendedDecapsulateEgressFlow  SFlowFlowRecordType = 1027
+	SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028
+	SFlowTypeExtendedVniEgressFlow          SFlowFlowRecordType = 1029
+	SFlowTypeExtendedVniIngressFlow         SFlowFlowRecordType = 1030
+)
+
+func (rt SFlowFlowRecordType) String() string {
+	switch rt {
+	case SFlowTypeRawPacketFlow:
+		return "Raw Packet Flow Record"
+	case SFlowTypeEthernetFrameFlow:
+		return "Ethernet Frame Flow Record"
+	case SFlowTypeIpv4Flow:
+		return "IPv4 Flow Record"
+	case SFlowTypeIpv6Flow:
+		return "IPv6 Flow Record"
+	case SFlowTypeExtendedSwitchFlow:
+		return "Extended Switch Flow Record"
+	case SFlowTypeExtendedRouterFlow:
+		return "Extended Router Flow Record"
+	case SFlowTypeExtendedGatewayFlow:
+		return "Extended Gateway Flow Record"
+	case SFlowTypeExtendedUserFlow:
+		return "Extended User Flow Record"
+	case SFlowTypeExtendedUrlFlow:
+		return "Extended URL Flow Record"
+	case SFlowTypeExtendedMlpsFlow:
+		return "Extended MPLS Flow Record"
+	case SFlowTypeExtendedNatFlow:
+		return "Extended NAT Flow Record"
+	case SFlowTypeExtendedMlpsTunnelFlow:
+		return "Extended MPLS Tunnel Flow Record"
+	case SFlowTypeExtendedMlpsVcFlow:
+		return "Extended MPLS VC Flow Record"
+	case SFlowTypeExtendedMlpsFecFlow:
+		return "Extended MPLS FEC Flow Record"
+	case SFlowTypeExtendedMlpsLvpFecFlow:
+		return "Extended MPLS LVP FEC Flow Record"
+	case SFlowTypeExtendedVlanFlow:
+		return "Extended VLAN Flow Record"
+	case SFlowTypeExtendedIpv4TunnelEgressFlow:
+		return "Extended IPv4 Tunnel Egress Record"
+	case SFlowTypeExtendedIpv4TunnelIngressFlow:
+		return "Extended IPv4 Tunnel Ingress Record"
+	case SFlowTypeExtendedIpv6TunnelEgressFlow:
+		return "Extended IPv6 Tunnel Egress Record"
+	case SFlowTypeExtendedIpv6TunnelIngressFlow:
+		return "Extended IPv6 Tunnel Ingress Record"
+	case SFlowTypeExtendedDecapsulateEgressFlow:
+		return "Extended Decapsulate Egress Record"
+	case SFlowTypeExtendedDecapsulateIngressFlow:
+		return "Extended Decapsulate Ingress Record"
+	case SFlowTypeExtendedVniEgressFlow:
+		return "Extended VNI Ingress Record"
+	case SFlowTypeExtendedVniIngressFlow:
+		return "Extended VNI Ingress Record"
+	default:
+		return ""
+	}
+}
+
+// SFlowRawPacketFlowRecords hold information about a sampled
+// packet grabbed as it transited the agent. This is
+// perhaps the most useful and interesting record type,
+// as it holds the headers of the sampled packet and
+// can be used to build up a complete picture of the
+// traffic patterns on a network.
+//
+// The raw packet header is sent back into gopacket for
+// decoding, and the resulting gopackt.Packet is stored
+// in the Header member
+type SFlowRawPacketFlowRecord struct {
+	SFlowBaseFlowRecord
+	HeaderProtocol SFlowRawHeaderProtocol
+	FrameLength    uint32
+	PayloadRemoved uint32
+	HeaderLength   uint32
+	Header         gopacket.Packet
+}
+
+// Raw packet record types have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Header Protocol               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Frame Length                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Payload Removed               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Header Length                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  \                     Header                    \
+//  \                                               \
+//  \                                               \
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowRawHeaderProtocol uint32
+
+const (
+	SFlowProtoEthernet   SFlowRawHeaderProtocol = 1
+	SFlowProtoISO88024   SFlowRawHeaderProtocol = 2
+	SFlowProtoISO88025   SFlowRawHeaderProtocol = 3
+	SFlowProtoFDDI       SFlowRawHeaderProtocol = 4
+	SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5
+	SFlowProtoX25        SFlowRawHeaderProtocol = 6
+	SFlowProtoPPP        SFlowRawHeaderProtocol = 7
+	SFlowProtoSMDS       SFlowRawHeaderProtocol = 8
+	SFlowProtoAAL5       SFlowRawHeaderProtocol = 9
+	SFlowProtoAAL5_IP    SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */
+	SFlowProtoIPv4       SFlowRawHeaderProtocol = 11
+	SFlowProtoIPv6       SFlowRawHeaderProtocol = 12
+	SFlowProtoMPLS       SFlowRawHeaderProtocol = 13
+	SFlowProtoPOS        SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */
+)
+
+func (sfhp SFlowRawHeaderProtocol) String() string {
+	switch sfhp {
+	case SFlowProtoEthernet:
+		return "ETHERNET-ISO88023"
+	case SFlowProtoISO88024:
+		return "ISO88024-TOKENBUS"
+	case SFlowProtoISO88025:
+		return "ISO88025-TOKENRING"
+	case SFlowProtoFDDI:
+		return "FDDI"
+	case SFlowProtoFrameRelay:
+		return "FRAME-RELAY"
+	case SFlowProtoX25:
+		return "X25"
+	case SFlowProtoPPP:
+		return "PPP"
+	case SFlowProtoSMDS:
+		return "SMDS"
+	case SFlowProtoAAL5:
+		return "AAL5"
+	case SFlowProtoAAL5_IP:
+		return "AAL5-IP"
+	case SFlowProtoIPv4:
+		return "IPv4"
+	case SFlowProtoIPv6:
+		return "IPv6"
+	case SFlowProtoMPLS:
+		return "MPLS"
+	case SFlowProtoPOS:
+		return "POS"
+	}
+	return "UNKNOWN"
+}
+
+func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) {
+	rec := SFlowRawPacketFlowRecord{}
+	header := []byte{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4]))
+	*data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4))
+	*data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding]
+	rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default)
+	return rec, nil
+}
+
+// SFlowExtendedSwitchFlowRecord give additional information
+// about the sampled packet if it's available. It's mainly
+// useful for getting at the incoming and outgoing VLANs
+// An agent may or may not provide this information.
+type SFlowExtendedSwitchFlowRecord struct {
+	SFlowBaseFlowRecord
+	IncomingVLAN         uint32
+	IncomingVLANPriority uint32
+	OutgoingVLAN         uint32
+	OutgoingVLANPriority uint32
+}
+
+// Extended switch records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Incoming VLAN               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Incoming VLAN Priority         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Outgoing VLAN               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Outgoing VLAN Priority         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) {
+	es := SFlowExtendedSwitchFlowRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	es.EnterpriseID, es.Format = fdf.decode()
+	*data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return es, nil
+}
+
+// SFlowExtendedRouterFlowRecord gives additional information
+// about the layer 3 routing information used to forward
+// the packet
+type SFlowExtendedRouterFlowRecord struct {
+	SFlowBaseFlowRecord
+	NextHop                net.IP
+	NextHopSourceMask      uint32
+	NextHopDestinationMask uint32
+}
+
+// Extended router records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   IP version of next hop router (1=v4|2=v6)   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /     Next Hop address (v4=4byte|v6=16byte)     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Next Hop Source Mask             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Next Hop Destination Mask        |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) {
+	er := SFlowExtendedRouterFlowRecord{}
+	var fdf SFlowFlowDataFormat
+	var extendedRouterAddressType SFlowIPType
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	er.EnterpriseID, er.Format = fdf.decode()
+	*data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()]
+	*data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return er, nil
+}
+
+// SFlowExtendedGatewayFlowRecord describes information treasured by
+// nework engineers everywhere: AS path information listing which
+// BGP peer sent the packet, and various other BGP related info.
+// This information is vital because it gives a picture of how much
+// traffic is being sent from / received by various BGP peers.
+
+// Extended gateway records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   IP version of next hop router (1=v4|2=v6)   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /     Next Hop address (v4=4byte|v6=16byte)     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       AS                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source AS                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Peer AS                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  AS Path Count                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                AS Path / Sequence             /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   Communities                 /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Local Pref                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// AS Path / Sequence:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |     AS Source Type (Path=1 / Sequence=2)      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Path / Sequence length           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /              Path / Sequence Members          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Communities:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                communitiy length              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /              communitiy Members               /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedGatewayFlowRecord struct {
+	SFlowBaseFlowRecord
+	NextHop     net.IP
+	AS          uint32
+	SourceAS    uint32
+	PeerAS      uint32
+	ASPathCount uint32
+	ASPath      []SFlowASDestination
+	Communities []uint32
+	LocalPref   uint32
+}
+
+type SFlowASPathType uint32
+
+const (
+	SFlowASSet      SFlowASPathType = 1
+	SFlowASSequence SFlowASPathType = 2
+)
+
+func (apt SFlowASPathType) String() string {
+	switch apt {
+	case SFlowASSet:
+		return "AS Set"
+	case SFlowASSequence:
+		return "AS Sequence"
+	default:
+		return ""
+	}
+}
+
+type SFlowASDestination struct {
+	Type    SFlowASPathType
+	Count   uint32
+	Members []uint32
+}
+
+func (asd SFlowASDestination) String() string {
+	switch asd.Type {
+	case SFlowASSet:
+		return fmt.Sprint("AS Set:", asd.Members)
+	case SFlowASSequence:
+		return fmt.Sprint("AS Sequence:", asd.Members)
+	default:
+		return ""
+	}
+}
+
+func (ad *SFlowASDestination) decodePath(data *[]byte) {
+	*data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	ad.Members = make([]uint32, ad.Count)
+	for i := uint32(0); i < ad.Count; i++ {
+		var member uint32
+		*data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		ad.Members[i] = member
+	}
+}
+
+func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) {
+	eg := SFlowExtendedGatewayFlowRecord{}
+	var fdf SFlowFlowDataFormat
+	var extendedGatewayAddressType SFlowIPType
+	var communitiesLength uint32
+	var community uint32
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eg.EnterpriseID, eg.Format = fdf.decode()
+	*data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()]
+	*data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	for i := uint32(0); i < eg.ASPathCount; i++ {
+		asPath := SFlowASDestination{}
+		asPath.decodePath(data)
+		eg.ASPath = append(eg.ASPath, asPath)
+	}
+	*data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	eg.Communities = make([]uint32, communitiesLength)
+	for j := uint32(0); j < communitiesLength; j++ {
+		*data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		eg.Communities[j] = community
+	}
+	*data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return eg, nil
+}
+
+// **************************************************
+//  Extended URL Flow Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   direction                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      URL                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      Host                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowURLDirection uint32
+
+const (
+	SFlowURLsrc SFlowURLDirection = 1
+	SFlowURLdst SFlowURLDirection = 2
+)
+
+func (urld SFlowURLDirection) String() string {
+	switch urld {
+	case SFlowURLsrc:
+		return "Source address is the server"
+	case SFlowURLdst:
+		return "Destination address is the server"
+	default:
+		return ""
+	}
+}
+
+type SFlowExtendedURLRecord struct {
+	SFlowBaseFlowRecord
+	Direction SFlowURLDirection
+	URL       string
+	Host      string
+}
+
+func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) {
+	eur := SFlowExtendedURLRecord{}
+	var fdf SFlowFlowDataFormat
+	var urlLen uint32
+	var urlLenWithPad int
+	var hostLen uint32
+	var hostLenWithPad int
+	var urlBytes []byte
+	var hostBytes []byte
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eur.EnterpriseID, eur.Format = fdf.decode()
+	*data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4]))
+	*data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	urlLenWithPad = int(urlLen + ((4 - urlLen) % 4))
+	*data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad]
+	eur.URL = string(urlBytes[:urlLen])
+	*data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	hostLenWithPad = int(hostLen + ((4 - hostLen) % 4))
+	*data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad]
+	eur.Host = string(hostBytes[:hostLen])
+	return eur, nil
+}
+
+// **************************************************
+//  Extended User Flow Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Source Character Set           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Source User Id                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Destination Character Set        |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Destination User ID             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedUserFlow struct {
+	SFlowBaseFlowRecord
+	SourceCharSet      SFlowCharSet
+	SourceUserID       string
+	DestinationCharSet SFlowCharSet
+	DestinationUserID  string
+}
+
+type SFlowCharSet uint32
+
+const (
+	SFlowCSunknown                 SFlowCharSet = 2
+	SFlowCSASCII                   SFlowCharSet = 3
+	SFlowCSISOLatin1               SFlowCharSet = 4
+	SFlowCSISOLatin2               SFlowCharSet = 5
+	SFlowCSISOLatin3               SFlowCharSet = 6
+	SFlowCSISOLatin4               SFlowCharSet = 7
+	SFlowCSISOLatinCyrillic        SFlowCharSet = 8
+	SFlowCSISOLatinArabic          SFlowCharSet = 9
+	SFlowCSISOLatinGreek           SFlowCharSet = 10
+	SFlowCSISOLatinHebrew          SFlowCharSet = 11
+	SFlowCSISOLatin5               SFlowCharSet = 12
+	SFlowCSISOLatin6               SFlowCharSet = 13
+	SFlowCSISOTextComm             SFlowCharSet = 14
+	SFlowCSHalfWidthKatakana       SFlowCharSet = 15
+	SFlowCSJISEncoding             SFlowCharSet = 16
+	SFlowCSShiftJIS                SFlowCharSet = 17
+	SFlowCSEUCPkdFmtJapanese       SFlowCharSet = 18
+	SFlowCSEUCFixWidJapanese       SFlowCharSet = 19
+	SFlowCSISO4UnitedKingdom       SFlowCharSet = 20
+	SFlowCSISO11SwedishForNames    SFlowCharSet = 21
+	SFlowCSISO15Italian            SFlowCharSet = 22
+	SFlowCSISO17Spanish            SFlowCharSet = 23
+	SFlowCSISO21German             SFlowCharSet = 24
+	SFlowCSISO60DanishNorwegian    SFlowCharSet = 25
+	SFlowCSISO69French             SFlowCharSet = 26
+	SFlowCSISO10646UTF1            SFlowCharSet = 27
+	SFlowCSISO646basic1983         SFlowCharSet = 28
+	SFlowCSINVARIANT               SFlowCharSet = 29
+	SFlowCSISO2IntlRefVersion      SFlowCharSet = 30
+	SFlowCSNATSSEFI                SFlowCharSet = 31
+	SFlowCSNATSSEFIADD             SFlowCharSet = 32
+	SFlowCSNATSDANO                SFlowCharSet = 33
+	SFlowCSNATSDANOADD             SFlowCharSet = 34
+	SFlowCSISO10Swedish            SFlowCharSet = 35
+	SFlowCSKSC56011987             SFlowCharSet = 36
+	SFlowCSISO2022KR               SFlowCharSet = 37
+	SFlowCSEUCKR                   SFlowCharSet = 38
+	SFlowCSISO2022JP               SFlowCharSet = 39
+	SFlowCSISO2022JP2              SFlowCharSet = 40
+	SFlowCSISO13JISC6220jp         SFlowCharSet = 41
+	SFlowCSISO14JISC6220ro         SFlowCharSet = 42
+	SFlowCSISO16Portuguese         SFlowCharSet = 43
+	SFlowCSISO18Greek7Old          SFlowCharSet = 44
+	SFlowCSISO19LatinGreek         SFlowCharSet = 45
+	SFlowCSISO25French             SFlowCharSet = 46
+	SFlowCSISO27LatinGreek1        SFlowCharSet = 47
+	SFlowCSISO5427Cyrillic         SFlowCharSet = 48
+	SFlowCSISO42JISC62261978       SFlowCharSet = 49
+	SFlowCSISO47BSViewdata         SFlowCharSet = 50
+	SFlowCSISO49INIS               SFlowCharSet = 51
+	SFlowCSISO50INIS8              SFlowCharSet = 52
+	SFlowCSISO51INISCyrillic       SFlowCharSet = 53
+	SFlowCSISO54271981             SFlowCharSet = 54
+	SFlowCSISO5428Greek            SFlowCharSet = 55
+	SFlowCSISO57GB1988             SFlowCharSet = 56
+	SFlowCSISO58GB231280           SFlowCharSet = 57
+	SFlowCSISO61Norwegian2         SFlowCharSet = 58
+	SFlowCSISO70VideotexSupp1      SFlowCharSet = 59
+	SFlowCSISO84Portuguese2        SFlowCharSet = 60
+	SFlowCSISO85Spanish2           SFlowCharSet = 61
+	SFlowCSISO86Hungarian          SFlowCharSet = 62
+	SFlowCSISO87JISX0208           SFlowCharSet = 63
+	SFlowCSISO88Greek7             SFlowCharSet = 64
+	SFlowCSISO89ASMO449            SFlowCharSet = 65
+	SFlowCSISO90                   SFlowCharSet = 66
+	SFlowCSISO91JISC62291984a      SFlowCharSet = 67
+	SFlowCSISO92JISC62991984b      SFlowCharSet = 68
+	SFlowCSISO93JIS62291984badd    SFlowCharSet = 69
+	SFlowCSISO94JIS62291984hand    SFlowCharSet = 70
+	SFlowCSISO95JIS62291984handadd SFlowCharSet = 71
+	SFlowCSISO96JISC62291984kana   SFlowCharSet = 72
+	SFlowCSISO2033                 SFlowCharSet = 73
+	SFlowCSISO99NAPLPS             SFlowCharSet = 74
+	SFlowCSISO102T617bit           SFlowCharSet = 75
+	SFlowCSISO103T618bit           SFlowCharSet = 76
+	SFlowCSISO111ECMACyrillic      SFlowCharSet = 77
+	SFlowCSa71                     SFlowCharSet = 78
+	SFlowCSa72                     SFlowCharSet = 79
+	SFlowCSISO123CSAZ24341985gr    SFlowCharSet = 80
+	SFlowCSISO88596E               SFlowCharSet = 81
+	SFlowCSISO88596I               SFlowCharSet = 82
+	SFlowCSISO128T101G2            SFlowCharSet = 83
+	SFlowCSISO88598E               SFlowCharSet = 84
+	SFlowCSISO88598I               SFlowCharSet = 85
+	SFlowCSISO139CSN369103         SFlowCharSet = 86
+	SFlowCSISO141JUSIB1002         SFlowCharSet = 87
+	SFlowCSISO143IECP271           SFlowCharSet = 88
+	SFlowCSISO146Serbian           SFlowCharSet = 89
+	SFlowCSISO147Macedonian        SFlowCharSet = 90
+	SFlowCSISO150                  SFlowCharSet = 91
+	SFlowCSISO151Cuba              SFlowCharSet = 92
+	SFlowCSISO6937Add              SFlowCharSet = 93
+	SFlowCSISO153GOST1976874       SFlowCharSet = 94
+	SFlowCSISO8859Supp             SFlowCharSet = 95
+	SFlowCSISO10367Box             SFlowCharSet = 96
+	SFlowCSISO158Lap               SFlowCharSet = 97
+	SFlowCSISO159JISX02121990      SFlowCharSet = 98
+	SFlowCSISO646Danish            SFlowCharSet = 99
+	SFlowCSUSDK                    SFlowCharSet = 100
+	SFlowCSDKUS                    SFlowCharSet = 101
+	SFlowCSKSC5636                 SFlowCharSet = 102
+	SFlowCSUnicode11UTF7           SFlowCharSet = 103
+	SFlowCSISO2022CN               SFlowCharSet = 104
+	SFlowCSISO2022CNEXT            SFlowCharSet = 105
+	SFlowCSUTF8                    SFlowCharSet = 106
+	SFlowCSISO885913               SFlowCharSet = 109
+	SFlowCSISO885914               SFlowCharSet = 110
+	SFlowCSISO885915               SFlowCharSet = 111
+	SFlowCSISO885916               SFlowCharSet = 112
+	SFlowCSGBK                     SFlowCharSet = 113
+	SFlowCSGB18030                 SFlowCharSet = 114
+	SFlowCSOSDEBCDICDF0415         SFlowCharSet = 115
+	SFlowCSOSDEBCDICDF03IRV        SFlowCharSet = 116
+	SFlowCSOSDEBCDICDF041          SFlowCharSet = 117
+	SFlowCSISO115481               SFlowCharSet = 118
+	SFlowCSKZ1048                  SFlowCharSet = 119
+	SFlowCSUnicode                 SFlowCharSet = 1000
+	SFlowCSUCS4                    SFlowCharSet = 1001
+	SFlowCSUnicodeASCII            SFlowCharSet = 1002
+	SFlowCSUnicodeLatin1           SFlowCharSet = 1003
+	SFlowCSUnicodeJapanese         SFlowCharSet = 1004
+	SFlowCSUnicodeIBM1261          SFlowCharSet = 1005
+	SFlowCSUnicodeIBM1268          SFlowCharSet = 1006
+	SFlowCSUnicodeIBM1276          SFlowCharSet = 1007
+	SFlowCSUnicodeIBM1264          SFlowCharSet = 1008
+	SFlowCSUnicodeIBM1265          SFlowCharSet = 1009
+	SFlowCSUnicode11               SFlowCharSet = 1010
+	SFlowCSSCSU                    SFlowCharSet = 1011
+	SFlowCSUTF7                    SFlowCharSet = 1012
+	SFlowCSUTF16BE                 SFlowCharSet = 1013
+	SFlowCSUTF16LE                 SFlowCharSet = 1014
+	SFlowCSUTF16                   SFlowCharSet = 1015
+	SFlowCSCESU8                   SFlowCharSet = 1016
+	SFlowCSUTF32                   SFlowCharSet = 1017
+	SFlowCSUTF32BE                 SFlowCharSet = 1018
+	SFlowCSUTF32LE                 SFlowCharSet = 1019
+	SFlowCSBOCU1                   SFlowCharSet = 1020
+	SFlowCSWindows30Latin1         SFlowCharSet = 2000
+	SFlowCSWindows31Latin1         SFlowCharSet = 2001
+	SFlowCSWindows31Latin2         SFlowCharSet = 2002
+	SFlowCSWindows31Latin5         SFlowCharSet = 2003
+	SFlowCSHPRoman8                SFlowCharSet = 2004
+	SFlowCSAdobeStandardEncoding   SFlowCharSet = 2005
+	SFlowCSVenturaUS               SFlowCharSet = 2006
+	SFlowCSVenturaInternational    SFlowCharSet = 2007
+	SFlowCSDECMCS                  SFlowCharSet = 2008
+	SFlowCSPC850Multilingual       SFlowCharSet = 2009
+	SFlowCSPCp852                  SFlowCharSet = 2010
+	SFlowCSPC8CodePage437          SFlowCharSet = 2011
+	SFlowCSPC8DanishNorwegian      SFlowCharSet = 2012
+	SFlowCSPC862LatinHebrew        SFlowCharSet = 2013
+	SFlowCSPC8Turkish              SFlowCharSet = 2014
+	SFlowCSIBMSymbols              SFlowCharSet = 2015
+	SFlowCSIBMThai                 SFlowCharSet = 2016
+	SFlowCSHPLegal                 SFlowCharSet = 2017
+	SFlowCSHPPiFont                SFlowCharSet = 2018
+	SFlowCSHPMath8                 SFlowCharSet = 2019
+	SFlowCSHPPSMath                SFlowCharSet = 2020
+	SFlowCSHPDesktop               SFlowCharSet = 2021
+	SFlowCSVenturaMath             SFlowCharSet = 2022
+	SFlowCSMicrosoftPublishing     SFlowCharSet = 2023
+	SFlowCSWindows31J              SFlowCharSet = 2024
+	SFlowCSGB2312                  SFlowCharSet = 2025
+	SFlowCSBig5                    SFlowCharSet = 2026
+	SFlowCSMacintosh               SFlowCharSet = 2027
+	SFlowCSIBM037                  SFlowCharSet = 2028
+	SFlowCSIBM038                  SFlowCharSet = 2029
+	SFlowCSIBM273                  SFlowCharSet = 2030
+	SFlowCSIBM274                  SFlowCharSet = 2031
+	SFlowCSIBM275                  SFlowCharSet = 2032
+	SFlowCSIBM277                  SFlowCharSet = 2033
+	SFlowCSIBM278                  SFlowCharSet = 2034
+	SFlowCSIBM280                  SFlowCharSet = 2035
+	SFlowCSIBM281                  SFlowCharSet = 2036
+	SFlowCSIBM284                  SFlowCharSet = 2037
+	SFlowCSIBM285                  SFlowCharSet = 2038
+	SFlowCSIBM290                  SFlowCharSet = 2039
+	SFlowCSIBM297                  SFlowCharSet = 2040
+	SFlowCSIBM420                  SFlowCharSet = 2041
+	SFlowCSIBM423                  SFlowCharSet = 2042
+	SFlowCSIBM424                  SFlowCharSet = 2043
+	SFlowCSIBM500                  SFlowCharSet = 2044
+	SFlowCSIBM851                  SFlowCharSet = 2045
+	SFlowCSIBM855                  SFlowCharSet = 2046
+	SFlowCSIBM857                  SFlowCharSet = 2047
+	SFlowCSIBM860                  SFlowCharSet = 2048
+	SFlowCSIBM861                  SFlowCharSet = 2049
+	SFlowCSIBM863                  SFlowCharSet = 2050
+	SFlowCSIBM864                  SFlowCharSet = 2051
+	SFlowCSIBM865                  SFlowCharSet = 2052
+	SFlowCSIBM868                  SFlowCharSet = 2053
+	SFlowCSIBM869                  SFlowCharSet = 2054
+	SFlowCSIBM870                  SFlowCharSet = 2055
+	SFlowCSIBM871                  SFlowCharSet = 2056
+	SFlowCSIBM880                  SFlowCharSet = 2057
+	SFlowCSIBM891                  SFlowCharSet = 2058
+	SFlowCSIBM903                  SFlowCharSet = 2059
+	SFlowCSIBBM904                 SFlowCharSet = 2060
+	SFlowCSIBM905                  SFlowCharSet = 2061
+	SFlowCSIBM918                  SFlowCharSet = 2062
+	SFlowCSIBM1026                 SFlowCharSet = 2063
+	SFlowCSIBMEBCDICATDE           SFlowCharSet = 2064
+	SFlowCSEBCDICATDEA             SFlowCharSet = 2065
+	SFlowCSEBCDICCAFR              SFlowCharSet = 2066
+	SFlowCSEBCDICDKNO              SFlowCharSet = 2067
+	SFlowCSEBCDICDKNOA             SFlowCharSet = 2068
+	SFlowCSEBCDICFISE              SFlowCharSet = 2069
+	SFlowCSEBCDICFISEA             SFlowCharSet = 2070
+	SFlowCSEBCDICFR                SFlowCharSet = 2071
+	SFlowCSEBCDICIT                SFlowCharSet = 2072
+	SFlowCSEBCDICPT                SFlowCharSet = 2073
+	SFlowCSEBCDICES                SFlowCharSet = 2074
+	SFlowCSEBCDICESA               SFlowCharSet = 2075
+	SFlowCSEBCDICESS               SFlowCharSet = 2076
+	SFlowCSEBCDICUK                SFlowCharSet = 2077
+	SFlowCSEBCDICUS                SFlowCharSet = 2078
+	SFlowCSUnknown8BiT             SFlowCharSet = 2079
+	SFlowCSMnemonic                SFlowCharSet = 2080
+	SFlowCSMnem                    SFlowCharSet = 2081
+	SFlowCSVISCII                  SFlowCharSet = 2082
+	SFlowCSVIQR                    SFlowCharSet = 2083
+	SFlowCSKOI8R                   SFlowCharSet = 2084
+	SFlowCSHZGB2312                SFlowCharSet = 2085
+	SFlowCSIBM866                  SFlowCharSet = 2086
+	SFlowCSPC775Baltic             SFlowCharSet = 2087
+	SFlowCSKOI8U                   SFlowCharSet = 2088
+	SFlowCSIBM00858                SFlowCharSet = 2089
+	SFlowCSIBM00924                SFlowCharSet = 2090
+	SFlowCSIBM01140                SFlowCharSet = 2091
+	SFlowCSIBM01141                SFlowCharSet = 2092
+	SFlowCSIBM01142                SFlowCharSet = 2093
+	SFlowCSIBM01143                SFlowCharSet = 2094
+	SFlowCSIBM01144                SFlowCharSet = 2095
+	SFlowCSIBM01145                SFlowCharSet = 2096
+	SFlowCSIBM01146                SFlowCharSet = 2097
+	SFlowCSIBM01147                SFlowCharSet = 2098
+	SFlowCSIBM01148                SFlowCharSet = 2099
+	SFlowCSIBM01149                SFlowCharSet = 2100
+	SFlowCSBig5HKSCS               SFlowCharSet = 2101
+	SFlowCSIBM1047                 SFlowCharSet = 2102
+	SFlowCSPTCP154                 SFlowCharSet = 2103
+	SFlowCSAmiga1251               SFlowCharSet = 2104
+	SFlowCSKOI7switched            SFlowCharSet = 2105
+	SFlowCSBRF                     SFlowCharSet = 2106
+	SFlowCSTSCII                   SFlowCharSet = 2107
+	SFlowCSCP51932                 SFlowCharSet = 2108
+	SFlowCSWindows874              SFlowCharSet = 2109
+	SFlowCSWindows1250             SFlowCharSet = 2250
+	SFlowCSWindows1251             SFlowCharSet = 2251
+	SFlowCSWindows1252             SFlowCharSet = 2252
+	SFlowCSWindows1253             SFlowCharSet = 2253
+	SFlowCSWindows1254             SFlowCharSet = 2254
+	SFlowCSWindows1255             SFlowCharSet = 2255
+	SFlowCSWindows1256             SFlowCharSet = 2256
+	SFlowCSWindows1257             SFlowCharSet = 2257
+	SFlowCSWindows1258             SFlowCharSet = 2258
+	SFlowCSTIS620                  SFlowCharSet = 2259
+	SFlowCS50220                   SFlowCharSet = 2260
+	SFlowCSreserved                SFlowCharSet = 3000
+)
+
+func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) {
+	eu := SFlowExtendedUserFlow{}
+	var fdf SFlowFlowDataFormat
+	var srcUserLen uint32
+	var srcUserLenWithPad int
+	var srcUserBytes []byte
+	var dstUserLen uint32
+	var dstUserLenWithPad int
+	var dstUserBytes []byte
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eu.EnterpriseID, eu.Format = fdf.decode()
+	*data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+	*data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4))
+	*data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad]
+	eu.SourceUserID = string(srcUserBytes[:srcUserLen])
+	*data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+	*data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4))
+	*data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad]
+	eu.DestinationUserID = string(dstUserBytes[:dstUserLen])
+	return eu, nil
+}
+
+// **************************************************
+//  Packet IP version 4 Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     Length                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Protocol                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source IPv4                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destination IPv4               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Source Port                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destionation Port              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TCP Flags                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TOS                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv4Record struct {
+	// The length of the IP packet excluding ower layer encapsulations
+	Length uint32
+	// IP Protocol type (for example, TCP = 6, UDP = 17)
+	Protocol uint32
+	// Source IP Address
+	IPSrc net.IP
+	// Destination IP Address
+	IPDst net.IP
+	// TCP/UDP source port number or equivalent
+	PortSrc uint32
+	// TCP/UDP destination port number or equivalent
+	PortDst uint32
+	// TCP flags
+	TCPFlags uint32
+	// IP type of service
+	TOS uint32
+}
+
+func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) {
+	si := SFlowIpv4Record{}
+
+	*data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.IPSrc = (*data)[4:], net.IP((*data)[:4])
+	*data, si.IPDst = (*data)[4:], net.IP((*data)[:4])
+	*data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return si, nil
+}
+
+// **************************************************
+//  Packet IP version 6 Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     Length                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Protocol                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source IPv4                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destination IPv4               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Source Port                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destionation Port              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TCP Flags                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Priority                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv6Record struct {
+	// The length of the IP packet excluding ower layer encapsulations
+	Length uint32
+	// IP Protocol type (for example, TCP = 6, UDP = 17)
+	Protocol uint32
+	// Source IP Address
+	IPSrc net.IP
+	// Destination IP Address
+	IPDst net.IP
+	// TCP/UDP source port number or equivalent
+	PortSrc uint32
+	// TCP/UDP destination port number or equivalent
+	PortDst uint32
+	// TCP flags
+	TCPFlags uint32
+	// IP priority
+	Priority uint32
+}
+
+func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) {
+	si := SFlowIpv6Record{}
+
+	*data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.IPSrc = (*data)[16:], net.IP((*data)[:16])
+	*data, si.IPDst = (*data)[16:], net.IP((*data)[:16])
+	*data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return si, nil
+}
+
+// **************************************************
+//  Extended IPv4 Tunnel Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 4 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelEgressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) {
+	rec := SFlowExtendedIpv4TunnelEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv4 Tunnel Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 4 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelIngressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) {
+	rec := SFlowExtendedIpv4TunnelIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv6 Tunnel Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 6 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelEgressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) {
+	rec := SFlowExtendedIpv6TunnelEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv6 Tunnel Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 6 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelIngressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) {
+	rec := SFlowExtendedIpv6TunnelIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended Decapsulate Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Inner Header Offset             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateEgressRecord struct {
+	SFlowBaseFlowRecord
+	InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) {
+	rec := SFlowExtendedDecapsulateEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended Decapsulate Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Inner Header Offset             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateIngressRecord struct {
+	SFlowBaseFlowRecord
+	InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) {
+	rec := SFlowExtendedDecapsulateIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended VNI Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       VNI                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniEgressRecord struct {
+	SFlowBaseFlowRecord
+	VNI uint32
+}
+
+func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) {
+	rec := SFlowExtendedVniEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended VNI Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       VNI                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniIngressRecord struct {
+	SFlowBaseFlowRecord
+	VNI uint32
+}
+
+func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) {
+	rec := SFlowExtendedVniIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   counter data                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowBaseCounterRecord struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowCounterRecordType
+	FlowDataLength uint32
+}
+
+func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType {
+	switch bcr.Format {
+	case SFlowTypeGenericInterfaceCounters:
+		return SFlowTypeGenericInterfaceCounters
+	case SFlowTypeEthernetInterfaceCounters:
+		return SFlowTypeEthernetInterfaceCounters
+	case SFlowTypeTokenRingInterfaceCounters:
+		return SFlowTypeTokenRingInterfaceCounters
+	case SFlowType100BaseVGInterfaceCounters:
+		return SFlowType100BaseVGInterfaceCounters
+	case SFlowTypeVLANCounters:
+		return SFlowTypeVLANCounters
+	case SFlowTypeLACPCounters:
+		return SFlowTypeLACPCounters
+	case SFlowTypeProcessorCounters:
+		return SFlowTypeProcessorCounters
+	case SFlowTypeOpenflowPortCounters:
+		return SFlowTypeOpenflowPortCounters
+	case SFlowTypePORTNAMECounters:
+		return SFlowTypePORTNAMECounters
+	case SFLowTypeAPPRESOURCESCounters:
+		return SFLowTypeAPPRESOURCESCounters
+	case SFlowTypeOVSDPCounters:
+		return SFlowTypeOVSDPCounters
+	}
+	unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format)
+	panic(unrecognized)
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfIndex                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfType                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfSpeed                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfDirection                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfStatus                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IFInOctets                  |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfInUcastPkts               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInMulticastPkts            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInBroadcastPkts            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfInDiscards               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    InInErrors                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInUnknownProtos            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutOctets                 |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutUcastPkts              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfOutMulticastPkts           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfOutBroadcastPkts           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutDiscards               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfOUtErrors                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 IfPromiscouousMode            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowGenericInterfaceCounters struct {
+	SFlowBaseCounterRecord
+	IfIndex            uint32
+	IfType             uint32
+	IfSpeed            uint64
+	IfDirection        uint32
+	IfStatus           uint32
+	IfInOctets         uint64
+	IfInUcastPkts      uint32
+	IfInMulticastPkts  uint32
+	IfInBroadcastPkts  uint32
+	IfInDiscards       uint32
+	IfInErrors         uint32
+	IfInUnknownProtos  uint32
+	IfOutOctets        uint64
+	IfOutUcastPkts     uint32
+	IfOutMulticastPkts uint32
+	IfOutBroadcastPkts uint32
+	IfOutDiscards      uint32
+	IfOutErrors        uint32
+	IfPromiscuousMode  uint32
+}
+
+func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) {
+	gic := SFlowGenericInterfaceCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	gic.EnterpriseID, gic.Format = cdf.decode()
+	*data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return gic, nil
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   counter data                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowEthernetCounters struct {
+	SFlowBaseCounterRecord
+	AlignmentErrors           uint32
+	FCSErrors                 uint32
+	SingleCollisionFrames     uint32
+	MultipleCollisionFrames   uint32
+	SQETestErrors             uint32
+	DeferredTransmissions     uint32
+	LateCollisions            uint32
+	ExcessiveCollisions       uint32
+	InternalMacTransmitErrors uint32
+	CarrierSenseErrors        uint32
+	FrameTooLongs             uint32
+	InternalMacReceiveErrors  uint32
+	SymbolErrors              uint32
+}
+
+func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) {
+	ec := SFlowEthernetCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	ec.EnterpriseID, ec.Format = cdf.decode()
+	*data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return ec, nil
+}
+
+// VLAN Counter
+
+type SFlowVLANCounters struct {
+	SFlowBaseCounterRecord
+	VlanID        uint32
+	Octets        uint64
+	UcastPkts     uint32
+	MulticastPkts uint32
+	BroadcastPkts uint32
+	Discards      uint32
+}
+
+func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) {
+	vc := SFlowVLANCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	vc.EnterpriseID, vc.Format = cdf.decode()
+	vc.EnterpriseID, vc.Format = cdf.decode()
+	*data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return vc, nil
+}
+
+//SFLLACPportState  :  SFlow LACP Port State (All(4) - 32 bit)
+type SFLLACPPortState struct {
+	PortStateAll uint32
+}
+
+//LACPcounters  :  LACP SFlow Counters  ( 64 Bytes )
+type SFlowLACPCounters struct {
+	SFlowBaseCounterRecord
+	ActorSystemID        net.HardwareAddr
+	PartnerSystemID      net.HardwareAddr
+	AttachedAggID        uint32
+	LacpPortState        SFLLACPPortState
+	LACPDUsRx            uint32
+	MarkerPDUsRx         uint32
+	MarkerResponsePDUsRx uint32
+	UnknownRx            uint32
+	IllegalRx            uint32
+	LACPDUsTx            uint32
+	MarkerPDUsTx         uint32
+	MarkerResponsePDUsTx uint32
+}
+
+func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) {
+	la := SFlowLACPCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	la.EnterpriseID, la.Format = cdf.decode()
+	*data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.ActorSystemID = (*data)[6:], (*data)[:6]
+	*data = (*data)[2:] // remove padding
+	*data, la.PartnerSystemID = (*data)[6:], (*data)[:6]
+	*data = (*data)[2:] //remove padding
+	*data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return la, nil
+
+}
+
+// **************************************************
+//  Processor Counter Record
+// **************************************************
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    FiveSecCpu                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    OneMinCpu                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    GiveMinCpu                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TotalMemory                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    FreeMemory                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowProcessorCounters struct {
+	SFlowBaseCounterRecord
+	FiveSecCpu  uint32 // 5 second average CPU utilization
+	OneMinCpu   uint32 // 1 minute average CPU utilization
+	FiveMinCpu  uint32 // 5 minute average CPU utilization
+	TotalMemory uint64 // total memory (in bytes)
+	FreeMemory  uint64 // free memory (in bytes)
+}
+
+func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) {
+	pc := SFlowProcessorCounters{}
+	var cdf SFlowCounterDataFormat
+	var high32, low32 uint32
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	pc.EnterpriseID, pc.Format = cdf.decode()
+	*data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	*data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pc.TotalMemory = (uint64(high32) << 32) + uint64(low32)
+	*data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pc.FreeMemory = (uint64(high32)) + uint64(low32)
+
+	return pc, nil
+}
+
+// SFlowEthernetFrameFlowRecord give additional information
+// about the sampled packet if it's available.
+// An agent may or may not provide this information.
+type SFlowEthernetFrameFlowRecord struct {
+	SFlowBaseFlowRecord
+	FrameLength uint32
+	SrcMac      net.HardwareAddr
+	DstMac      net.HardwareAddr
+	Type        uint32
+}
+
+// Ethernet frame flow records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Source Mac Address             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |             Destination Mac Address           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Ethernet Packet Type            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) {
+	es := SFlowEthernetFrameFlowRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	es.EnterpriseID, es.Format = fdf.decode()
+	*data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	*data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+	*data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+	*data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return es, nil
+}
+
+//SFlowOpenflowPortCounters  :  OVS-Sflow OpenFlow Port Counter  ( 20 Bytes )
+type SFlowOpenflowPortCounters struct {
+	SFlowBaseCounterRecord
+	DatapathID uint64
+	PortNo     uint32
+}
+
+func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) {
+	ofp := SFlowOpenflowPortCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	ofp.EnterpriseID, ofp.Format = cdf.decode()
+	*data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return ofp, nil
+}
+
+//SFlowAppresourcesCounters  :  OVS_Sflow App Resources Counter ( 48 Bytes )
+type SFlowAppresourcesCounters struct {
+	SFlowBaseCounterRecord
+	UserTime   uint32
+	SystemTime uint32
+	MemUsed    uint64
+	MemMax     uint64
+	FdOpen     uint32
+	FdMax      uint32
+	ConnOpen   uint32
+	ConnMax    uint32
+}
+
+func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) {
+	app := SFlowAppresourcesCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	app.EnterpriseID, app.Format = cdf.decode()
+	*data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return app, nil
+}
+
+//SFlowOVSDPCounters  :  OVS-Sflow DataPath Counter  ( 32 Bytes )
+type SFlowOVSDPCounters struct {
+	SFlowBaseCounterRecord
+	NHit     uint32
+	NMissed  uint32
+	NLost    uint32
+	NMaskHit uint32
+	NFlows   uint32
+	NMasks   uint32
+}
+
+func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) {
+	dp := SFlowOVSDPCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	dp.EnterpriseID, dp.Format = cdf.decode()
+	*data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return dp, nil
+}
+
+//SFlowPORTNAME  :  OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes )
+type SFlowPORTNAME struct {
+	SFlowBaseCounterRecord
+	Len uint32
+	Str string
+}
+
+func decodeString(data *[]byte) (len uint32, str string) {
+	*data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	str = string((*data)[:len])
+	if (len % 4) != 0 {
+		len += 4 - len%4
+	}
+	*data = (*data)[len:]
+	return
+}
+
+func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) {
+	pn := SFlowPORTNAME{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	pn.EnterpriseID, pn.Format = cdf.decode()
+	*data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pn.Len, pn.Str = decodeString(data)
+
+	return pn, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go
new file mode 100644
index 0000000..5403688
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sip.go
@@ -0,0 +1,546 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// SIPVersion defines the different versions of the SIP Protocol
+type SIPVersion uint8
+
+// Represents all the versions of SIP protocol
+const (
+	SIPVersion1 SIPVersion = 1
+	SIPVersion2 SIPVersion = 2
+)
+
+func (sv SIPVersion) String() string {
+	switch sv {
+	default:
+		// Defaulting to SIP/2.0
+		return "SIP/2.0"
+	case SIPVersion1:
+		return "SIP/1.0"
+	case SIPVersion2:
+		return "SIP/2.0"
+	}
+}
+
+// GetSIPVersion is used to get SIP version constant
+func GetSIPVersion(version string) (SIPVersion, error) {
+	switch strings.ToUpper(version) {
+	case "SIP/1.0":
+		return SIPVersion1, nil
+	case "SIP/2.0":
+		return SIPVersion2, nil
+	default:
+		return 0, fmt.Errorf("Unknown SIP version: '%s'", version)
+
+	}
+}
+
+// SIPMethod defines the different methods of the SIP Protocol
+// defined in the different RFC's
+type SIPMethod uint16
+
+// Here are all the SIP methods
+const (
+	SIPMethodInvite    SIPMethod = 1  // INVITE	[RFC3261]
+	SIPMethodAck       SIPMethod = 2  // ACK	[RFC3261]
+	SIPMethodBye       SIPMethod = 3  // BYE	[RFC3261]
+	SIPMethodCancel    SIPMethod = 4  // CANCEL	[RFC3261]
+	SIPMethodOptions   SIPMethod = 5  // OPTIONS	[RFC3261]
+	SIPMethodRegister  SIPMethod = 6  // REGISTER	[RFC3261]
+	SIPMethodPrack     SIPMethod = 7  // PRACK	[RFC3262]
+	SIPMethodSubscribe SIPMethod = 8  // SUBSCRIBE	[RFC6665]
+	SIPMethodNotify    SIPMethod = 9  // NOTIFY	[RFC6665]
+	SIPMethodPublish   SIPMethod = 10 // PUBLISH	[RFC3903]
+	SIPMethodInfo      SIPMethod = 11 // INFO	[RFC6086]
+	SIPMethodRefer     SIPMethod = 12 // REFER	[RFC3515]
+	SIPMethodMessage   SIPMethod = 13 // MESSAGE	[RFC3428]
+	SIPMethodUpdate    SIPMethod = 14 // UPDATE	[RFC3311]
+	SIPMethodPing      SIPMethod = 15 // PING	[https://tools.ietf.org/html/draft-fwmiller-ping-03]
+)
+
+func (sm SIPMethod) String() string {
+	switch sm {
+	default:
+		return "Unknown method"
+	case SIPMethodInvite:
+		return "INVITE"
+	case SIPMethodAck:
+		return "ACK"
+	case SIPMethodBye:
+		return "BYE"
+	case SIPMethodCancel:
+		return "CANCEL"
+	case SIPMethodOptions:
+		return "OPTIONS"
+	case SIPMethodRegister:
+		return "REGISTER"
+	case SIPMethodPrack:
+		return "PRACK"
+	case SIPMethodSubscribe:
+		return "SUBSCRIBE"
+	case SIPMethodNotify:
+		return "NOTIFY"
+	case SIPMethodPublish:
+		return "PUBLISH"
+	case SIPMethodInfo:
+		return "INFO"
+	case SIPMethodRefer:
+		return "REFER"
+	case SIPMethodMessage:
+		return "MESSAGE"
+	case SIPMethodUpdate:
+		return "UPDATE"
+	case SIPMethodPing:
+		return "PING"
+	}
+}
+
+// GetSIPMethod returns the constant of a SIP method
+// from its string
+func GetSIPMethod(method string) (SIPMethod, error) {
+	switch strings.ToUpper(method) {
+	case "INVITE":
+		return SIPMethodInvite, nil
+	case "ACK":
+		return SIPMethodAck, nil
+	case "BYE":
+		return SIPMethodBye, nil
+	case "CANCEL":
+		return SIPMethodCancel, nil
+	case "OPTIONS":
+		return SIPMethodOptions, nil
+	case "REGISTER":
+		return SIPMethodRegister, nil
+	case "PRACK":
+		return SIPMethodPrack, nil
+	case "SUBSCRIBE":
+		return SIPMethodSubscribe, nil
+	case "NOTIFY":
+		return SIPMethodNotify, nil
+	case "PUBLISH":
+		return SIPMethodPublish, nil
+	case "INFO":
+		return SIPMethodInfo, nil
+	case "REFER":
+		return SIPMethodRefer, nil
+	case "MESSAGE":
+		return SIPMethodMessage, nil
+	case "UPDATE":
+		return SIPMethodUpdate, nil
+	case "PING":
+		return SIPMethodPing, nil
+	default:
+		return 0, fmt.Errorf("Unknown SIP method: '%s'", method)
+	}
+}
+
+// Here is a correspondance between long header names and short
+// as defined in rfc3261 in section 20
+var compactSipHeadersCorrespondance = map[string]string{
+	"accept-contact":      "a",
+	"allow-events":        "u",
+	"call-id":             "i",
+	"contact":             "m",
+	"content-encoding":    "e",
+	"content-length":      "l",
+	"content-type":        "c",
+	"event":               "o",
+	"from":                "f",
+	"identity":            "y",
+	"refer-to":            "r",
+	"referred-by":         "b",
+	"reject-contact":      "j",
+	"request-disposition": "d",
+	"session-expires":     "x",
+	"subject":             "s",
+	"supported":           "k",
+	"to":                  "t",
+	"via":                 "v",
+}
+
+// SIP object will contains information about decoded SIP packet.
+// -> The SIP Version
+// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name
+// -> The SIP Method
+// -> The SIP Response code (if it's a response)
+// -> The SIP Status line (if it's a response)
+// You can easily know the type of the packet with the IsResponse boolean
+//
+type SIP struct {
+	BaseLayer
+
+	// Base information
+	Version SIPVersion
+	Method  SIPMethod
+	Headers map[string][]string
+
+	// Request
+	RequestURI string
+
+	// Response
+	IsResponse     bool
+	ResponseCode   int
+	ResponseStatus string
+
+	// Private fields
+	cseq             int64
+	contentLength    int64
+	lastHeaderParsed string
+}
+
+// decodeSIP decodes the byte slice into a SIP type. It also
+// setups the application Layer in PacketBuilder.
+func decodeSIP(data []byte, p gopacket.PacketBuilder) error {
+	s := NewSIP()
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	p.SetApplicationLayer(s)
+	return nil
+}
+
+// NewSIP instantiates a new empty SIP object
+func NewSIP() *SIP {
+	s := new(SIP)
+	s.Headers = make(map[string][]string)
+	return s
+}
+
+// LayerType returns gopacket.LayerTypeSIP.
+func (s *SIP) LayerType() gopacket.LayerType {
+	return LayerTypeSIP
+}
+
+// Payload returns the base layer payload
+func (s *SIP) Payload() []byte {
+	return s.BaseLayer.Payload
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode
+func (s *SIP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer
+func (s *SIP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the slice into the SIP struct.
+func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// Init some vars for parsing follow-up
+	var countLines int
+	var line []byte
+	var err error
+
+	// Clean leading new line
+	data = bytes.Trim(data, "\n")
+
+	// Iterate on all lines of the SIP Headers
+	// and stop when we reach the SDP (aka when the new line
+	// is at index 0 of the remaining packet)
+	buffer := bytes.NewBuffer(data)
+
+	for {
+
+		// Read next line
+		line, err = buffer.ReadBytes(byte('\n'))
+		if err != nil {
+			if err == io.EOF {
+				break
+			} else {
+				return err
+			}
+		}
+
+		// Trim the new line delimiters
+		line = bytes.Trim(line, "\r\n")
+
+		// Empty line, we hit Body
+		// Putting packet remain in Paypload
+		if len(line) == 0 {
+			s.BaseLayer.Payload = buffer.Bytes()
+			break
+		}
+
+		// First line is the SIP request/response line
+		// Other lines are headers
+		if countLines == 0 {
+			err = s.ParseFirstLine(line)
+			if err != nil {
+				return err
+			}
+
+		} else {
+			err = s.ParseHeader(line)
+			if err != nil {
+				return err
+			}
+		}
+
+		countLines++
+	}
+
+	return nil
+}
+
+// ParseFirstLine will compute the first line of a SIP packet.
+// The first line will tell us if it's a request or a response.
+//
+// Examples of first line of SIP Prococol :
+//
+// 	Request 	: INVITE bob@example.com SIP/2.0
+// 	Response 	: SIP/2.0 200 OK
+// 	Response	: SIP/2.0 501 Not Implemented
+//
+func (s *SIP) ParseFirstLine(firstLine []byte) error {
+
+	var err error
+
+	// Splits line by space
+	splits := strings.SplitN(string(firstLine), " ", 3)
+
+	// We must have at least 3 parts
+	if len(splits) < 3 {
+		return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine))
+	}
+
+	// Determine the SIP packet type
+	if strings.HasPrefix(splits[0], "SIP") {
+
+		// --> Response
+		s.IsResponse = true
+
+		// Validate SIP Version
+		s.Version, err = GetSIPVersion(splits[0])
+		if err != nil {
+			return err
+		}
+
+		// Compute code
+		s.ResponseCode, err = strconv.Atoi(splits[1])
+		if err != nil {
+			return err
+		}
+
+		// Compute status line
+		s.ResponseStatus = splits[2]
+
+	} else {
+
+		// --> Request
+
+		// Validate method
+		s.Method, err = GetSIPMethod(splits[0])
+		if err != nil {
+			return err
+		}
+
+		s.RequestURI = splits[1]
+
+		// Validate SIP Version
+		s.Version, err = GetSIPVersion(splits[2])
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ParseHeader will parse a SIP Header
+// SIP Headers are quite simple, there are colon separated name and value
+// Headers can be spread over multiple lines
+//
+// Examples of header :
+//
+//  CSeq: 1 REGISTER
+//  Via: SIP/2.0/UDP there.com:5060
+//  Authorization:Digest username="UserB",
+//	  realm="MCI WorldCom SIP",
+//    nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="",
+//    uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824"
+//
+func (s *SIP) ParseHeader(header []byte) (err error) {
+
+	// Ignore empty headers
+	if len(header) == 0 {
+		return
+	}
+
+	// Check if this is the following of last header
+	// RFC 3261 - 7.3.1 - Header Field Format specify that following lines of
+	// multiline headers must begin by SP or TAB
+	if header[0] == '\t' || header[0] == ' ' {
+
+		header = bytes.TrimSpace(header)
+		s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header))
+		return
+	}
+
+	// Find the ':' to separate header name and value
+	index := bytes.Index(header, []byte(":"))
+	if index >= 0 {
+
+		headerName := strings.ToLower(string(bytes.Trim(header[:index], " ")))
+		headerValue := string(bytes.Trim(header[index+1:], " "))
+
+		// Add header to object
+		s.Headers[headerName] = append(s.Headers[headerName], headerValue)
+		s.lastHeaderParsed = headerName
+
+		// Compute specific headers
+		err = s.ParseSpecificHeaders(headerName, headerValue)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ParseSpecificHeaders will parse some specific key values from
+// specific headers like CSeq or Content-Length integer values
+func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) {
+
+	switch headerName {
+	case "cseq":
+
+		// CSeq header value is formatted like that :
+		// CSeq: 123 INVITE
+		// We split the value to parse Cseq integer value, and method
+		splits := strings.Split(headerValue, " ")
+		if len(splits) > 1 {
+
+			// Parse Cseq
+			s.cseq, err = strconv.ParseInt(splits[0], 10, 64)
+			if err != nil {
+				return err
+			}
+
+			// Validate method
+			if s.IsResponse {
+				s.Method, err = GetSIPMethod(splits[1])
+				if err != nil {
+					return err
+				}
+			}
+		}
+
+	case "content-length":
+
+		// Parse Content-Length
+		s.contentLength, err = strconv.ParseInt(headerValue, 10, 64)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// GetAllHeaders will return the full headers of the
+// current SIP packets in a map[string][]string
+func (s *SIP) GetAllHeaders() map[string][]string {
+	return s.Headers
+}
+
+// GetHeader will return all the headers with
+// the specified name.
+func (s *SIP) GetHeader(headerName string) []string {
+	headerName = strings.ToLower(headerName)
+	h := make([]string, 0)
+	if _, ok := s.Headers[headerName]; ok {
+		if len(s.Headers[headerName]) > 0 {
+			return s.Headers[headerName]
+		} else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+			return s.Headers[compactSipHeadersCorrespondance[headerName]]
+		}
+	}
+	return h
+}
+
+// GetFirstHeader will return the first header with
+// the specified name. If the current SIP packet has multiple
+// headers with the same name, it returns the first.
+func (s *SIP) GetFirstHeader(headerName string) string {
+	headerName = strings.ToLower(headerName)
+	if _, ok := s.Headers[headerName]; ok {
+		if len(s.Headers[headerName]) > 0 {
+			return s.Headers[headerName][0]
+		} else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+			return s.Headers[compactSipHeadersCorrespondance[headerName]][0]
+		}
+	}
+	return ""
+}
+
+//
+// Some handy getters for most used SIP headers
+//
+
+// GetAuthorization will return the Authorization
+// header of the current SIP packet
+func (s *SIP) GetAuthorization() string {
+	return s.GetFirstHeader("Authorization")
+}
+
+// GetFrom will return the From
+// header of the current SIP packet
+func (s *SIP) GetFrom() string {
+	return s.GetFirstHeader("From")
+}
+
+// GetTo will return the To
+// header of the current SIP packet
+func (s *SIP) GetTo() string {
+	return s.GetFirstHeader("To")
+}
+
+// GetContact will return the Contact
+// header of the current SIP packet
+func (s *SIP) GetContact() string {
+	return s.GetFirstHeader("Contact")
+}
+
+// GetCallID will return the Call-ID
+// header of the current SIP packet
+func (s *SIP) GetCallID() string {
+	return s.GetFirstHeader("Call-ID")
+}
+
+// GetUserAgent will return the User-Agent
+// header of the current SIP packet
+func (s *SIP) GetUserAgent() string {
+	return s.GetFirstHeader("User-Agent")
+}
+
+// GetContentLength will return the parsed integer
+// Content-Length header of the current SIP packet
+func (s *SIP) GetContentLength() int64 {
+	return s.contentLength
+}
+
+// GetCSeq will return the parsed integer CSeq header
+// header of the current SIP packet
+func (s *SIP) GetCSeq() int64 {
+	return s.cseq
+}
diff --git a/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/google/gopacket/layers/stp.go
new file mode 100644
index 0000000..bde7d7c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/stp.go
@@ -0,0 +1,27 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message.
+type STP struct {
+	BaseLayer
+}
+
+// LayerType returns gopacket.LayerTypeSTP.
+func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP }
+
+func decodeSTP(data []byte, p gopacket.PacketBuilder) error {
+	stp := &STP{}
+	stp.Contents = data[:]
+	// TODO:  parse the STP protocol into actual subfields.
+	p.AddLayer(stp)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/google/gopacket/layers/tcp.go
new file mode 100644
index 0000000..6b37f56
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcp.go
@@ -0,0 +1,337 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// TCP is the layer for TCP headers.
+type TCP struct {
+	BaseLayer
+	SrcPort, DstPort                           TCPPort
+	Seq                                        uint32
+	Ack                                        uint32
+	DataOffset                                 uint8
+	FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool
+	Window                                     uint16
+	Checksum                                   uint16
+	Urgent                                     uint16
+	sPort, dPort                               []byte
+	Options                                    []TCPOption
+	Padding                                    []byte
+	opts                                       [4]TCPOption
+	tcpipchecksum
+}
+
+// TCPOptionKind represents a TCP option code.
+type TCPOptionKind uint8
+
+const (
+	TCPOptionKindEndList                         = 0
+	TCPOptionKindNop                             = 1
+	TCPOptionKindMSS                             = 2  // len = 4
+	TCPOptionKindWindowScale                     = 3  // len = 3
+	TCPOptionKindSACKPermitted                   = 4  // len = 2
+	TCPOptionKindSACK                            = 5  // len = n
+	TCPOptionKindEcho                            = 6  // len = 6, obsolete
+	TCPOptionKindEchoReply                       = 7  // len = 6, obsolete
+	TCPOptionKindTimestamps                      = 8  // len = 10
+	TCPOptionKindPartialOrderConnectionPermitted = 9  // len = 2, obsolete
+	TCPOptionKindPartialOrderServiceProfile      = 10 // len = 3, obsolete
+	TCPOptionKindCC                              = 11 // obsolete
+	TCPOptionKindCCNew                           = 12 // obsolete
+	TCPOptionKindCCEcho                          = 13 // obsolete
+	TCPOptionKindAltChecksum                     = 14 // len = 3, obsolete
+	TCPOptionKindAltChecksumData                 = 15 // len = n, obsolete
+)
+
+func (k TCPOptionKind) String() string {
+	switch k {
+	case TCPOptionKindEndList:
+		return "EndList"
+	case TCPOptionKindNop:
+		return "NOP"
+	case TCPOptionKindMSS:
+		return "MSS"
+	case TCPOptionKindWindowScale:
+		return "WindowScale"
+	case TCPOptionKindSACKPermitted:
+		return "SACKPermitted"
+	case TCPOptionKindSACK:
+		return "SACK"
+	case TCPOptionKindEcho:
+		return "Echo"
+	case TCPOptionKindEchoReply:
+		return "EchoReply"
+	case TCPOptionKindTimestamps:
+		return "Timestamps"
+	case TCPOptionKindPartialOrderConnectionPermitted:
+		return "PartialOrderConnectionPermitted"
+	case TCPOptionKindPartialOrderServiceProfile:
+		return "PartialOrderServiceProfile"
+	case TCPOptionKindCC:
+		return "CC"
+	case TCPOptionKindCCNew:
+		return "CCNew"
+	case TCPOptionKindCCEcho:
+		return "CCEcho"
+	case TCPOptionKindAltChecksum:
+		return "AltChecksum"
+	case TCPOptionKindAltChecksumData:
+		return "AltChecksumData"
+	default:
+		return fmt.Sprintf("Unknown(%d)", k)
+	}
+}
+
+type TCPOption struct {
+	OptionType   TCPOptionKind
+	OptionLength uint8
+	OptionData   []byte
+}
+
+func (t TCPOption) String() string {
+	hd := hex.EncodeToString(t.OptionData)
+	if len(hd) > 0 {
+		hd = " 0x" + hd
+	}
+	switch t.OptionType {
+	case TCPOptionKindMSS:
+		return fmt.Sprintf("TCPOption(%s:%v%s)",
+			t.OptionType,
+			binary.BigEndian.Uint16(t.OptionData),
+			hd)
+
+	case TCPOptionKindTimestamps:
+		if len(t.OptionData) == 8 {
+			return fmt.Sprintf("TCPOption(%s:%v/%v%s)",
+				t.OptionType,
+				binary.BigEndian.Uint32(t.OptionData[:4]),
+				binary.BigEndian.Uint32(t.OptionData[4:8]),
+				hd)
+		}
+	}
+	return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd)
+}
+
+// LayerType returns gopacket.LayerTypeTCP
+func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP }
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var optionLength int
+	for _, o := range t.Options {
+		switch o.OptionType {
+		case 0, 1:
+			optionLength += 1
+		default:
+			optionLength += 2 + len(o.OptionData)
+		}
+	}
+	if opts.FixLengths {
+		if rem := optionLength % 4; rem != 0 {
+			t.Padding = lotsOfZeros[:4-rem]
+		}
+		t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4)
+	}
+	bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding))
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort))
+	binary.BigEndian.PutUint32(bytes[4:], t.Seq)
+	binary.BigEndian.PutUint32(bytes[8:], t.Ack)
+	binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset())
+	binary.BigEndian.PutUint16(bytes[14:], t.Window)
+	binary.BigEndian.PutUint16(bytes[18:], t.Urgent)
+	start := 20
+	for _, o := range t.Options {
+		bytes[start] = byte(o.OptionType)
+		switch o.OptionType {
+		case 0, 1:
+			start++
+		default:
+			if opts.FixLengths {
+				o.OptionLength = uint8(len(o.OptionData) + 2)
+			}
+			bytes[start+1] = o.OptionLength
+			copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData)
+			start += len(o.OptionData) + 2
+		}
+	}
+	copy(bytes[start:], t.Padding)
+	if opts.ComputeChecksums {
+		// zero out checksum bytes in current serialization.
+		bytes[16] = 0
+		bytes[17] = 0
+		csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP)
+		if err != nil {
+			return err
+		}
+		t.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[16:], t.Checksum)
+	return nil
+}
+
+func (t *TCP) ComputeChecksum() (uint16, error) {
+	return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP)
+}
+
+func (t *TCP) flagsAndOffset() uint16 {
+	f := uint16(t.DataOffset) << 12
+	if t.FIN {
+		f |= 0x0001
+	}
+	if t.SYN {
+		f |= 0x0002
+	}
+	if t.RST {
+		f |= 0x0004
+	}
+	if t.PSH {
+		f |= 0x0008
+	}
+	if t.ACK {
+		f |= 0x0010
+	}
+	if t.URG {
+		f |= 0x0020
+	}
+	if t.ECE {
+		f |= 0x0040
+	}
+	if t.CWR {
+		f |= 0x0080
+	}
+	if t.NS {
+		f |= 0x0100
+	}
+	return f
+}
+
+func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data))
+	}
+	tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2]))
+	tcp.sPort = data[0:2]
+	tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4]))
+	tcp.dPort = data[2:4]
+	tcp.Seq = binary.BigEndian.Uint32(data[4:8])
+	tcp.Ack = binary.BigEndian.Uint32(data[8:12])
+	tcp.DataOffset = data[12] >> 4
+	tcp.FIN = data[13]&0x01 != 0
+	tcp.SYN = data[13]&0x02 != 0
+	tcp.RST = data[13]&0x04 != 0
+	tcp.PSH = data[13]&0x08 != 0
+	tcp.ACK = data[13]&0x10 != 0
+	tcp.URG = data[13]&0x20 != 0
+	tcp.ECE = data[13]&0x40 != 0
+	tcp.CWR = data[13]&0x80 != 0
+	tcp.NS = data[12]&0x01 != 0
+	tcp.Window = binary.BigEndian.Uint16(data[14:16])
+	tcp.Checksum = binary.BigEndian.Uint16(data[16:18])
+	tcp.Urgent = binary.BigEndian.Uint16(data[18:20])
+	if tcp.Options == nil {
+		// Pre-allocate to avoid allocating a slice.
+		tcp.Options = tcp.opts[:0]
+	} else {
+		tcp.Options = tcp.Options[:0]
+	}
+	if tcp.DataOffset < 5 {
+		return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset)
+	}
+	dataStart := int(tcp.DataOffset) * 4
+	if dataStart > len(data) {
+		df.SetTruncated()
+		tcp.Payload = nil
+		tcp.Contents = data
+		return errors.New("TCP data offset greater than packet length")
+	}
+	tcp.Contents = data[:dataStart]
+	tcp.Payload = data[dataStart:]
+	// From here on, data points just to the header options.
+	data = data[20:dataStart]
+	for len(data) > 0 {
+		tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])})
+		opt := &tcp.Options[len(tcp.Options)-1]
+		switch opt.OptionType {
+		case TCPOptionKindEndList: // End of options
+			opt.OptionLength = 1
+			tcp.Padding = data[1:]
+			break
+		case TCPOptionKindNop: // 1 byte padding
+			opt.OptionLength = 1
+		default:
+			if len(data) < 2 {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data))
+			}
+			opt.OptionLength = data[1]
+			if opt.OptionLength < 2 {
+				return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength)
+			} else if int(opt.OptionLength) > len(data) {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data))
+			}
+			opt.OptionData = data[2:opt.OptionLength]
+		}
+		data = data[opt.OptionLength:]
+	}
+	return nil
+}
+
+func (t *TCP) CanDecode() gopacket.LayerClass {
+	return LayerTypeTCP
+}
+
+func (t *TCP) NextLayerType() gopacket.LayerType {
+	lt := t.DstPort.LayerType()
+	if lt == gopacket.LayerTypePayload {
+		lt = t.SrcPort.LayerType()
+	}
+	return lt
+}
+
+func decodeTCP(data []byte, p gopacket.PacketBuilder) error {
+	tcp := &TCP{}
+	err := tcp.DecodeFromBytes(data, p)
+	p.AddLayer(tcp)
+	p.SetTransportLayer(tcp)
+	if err != nil {
+		return err
+	}
+	if p.DecodeOptions().DecodeStreamsAsDatagrams {
+		return p.NextDecoder(tcp.NextLayerType())
+	} else {
+		return p.NextDecoder(gopacket.LayerTypePayload)
+	}
+}
+
+func (t *TCP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort)
+}
+
+// For testing only
+func (t *TCP) SetInternalPortsForTesting() {
+	t.sPort = make([]byte, 2)
+	t.dPort = make([]byte, 2)
+	binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort))
+	binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/google/gopacket/layers/tcpip.go
new file mode 100644
index 0000000..64ba51c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcpip.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// Checksum computation for TCP/UDP.
+type tcpipchecksum struct {
+	pseudoheader tcpipPseudoHeader
+}
+
+type tcpipPseudoHeader interface {
+	pseudoheaderChecksum() (uint32, error)
+}
+
+func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
+	if err := ip.AddressTo4(); err != nil {
+		return 0, err
+	}
+	csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
+	csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
+	csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
+	csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
+	return csum, nil
+}
+
+func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
+	if err := ip.AddressTo16(); err != nil {
+		return 0, err
+	}
+	for i := 0; i < 16; i += 2 {
+		csum += uint32(ip.SrcIP[i]) << 8
+		csum += uint32(ip.SrcIP[i+1])
+		csum += uint32(ip.DstIP[i]) << 8
+		csum += uint32(ip.DstIP[i+1])
+	}
+	return csum, nil
+}
+
+// Calculate the TCP/IP checksum defined in rfc1071.  The passed-in csum is any
+// initial checksum data that's already been computed.
+func tcpipChecksum(data []byte, csum uint32) uint16 {
+	// to handle odd lengths, we loop to length - 1, incrementing by 2, then
+	// handle the last byte specifically by checking against the original
+	// length.
+	length := len(data) - 1
+	for i := 0; i < length; i += 2 {
+		// For our test packet, doing this manually is about 25% faster
+		// (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
+		csum += uint32(data[i]) << 8
+		csum += uint32(data[i+1])
+	}
+	if len(data)%2 == 1 {
+		csum += uint32(data[length]) << 8
+	}
+	for csum > 0xffff {
+		csum = (csum >> 16) + (csum & 0xffff)
+	}
+	return ^uint16(csum)
+}
+
+// computeChecksum computes a TCP or UDP checksum.  headerAndPayload is the
+// serialized TCP or UDP header plus its payload, with the checksum zero'd
+// out. headerProtocol is the IP protocol number of the upper-layer header.
+func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) {
+	if c.pseudoheader == nil {
+		return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
+	}
+	length := uint32(len(headerAndPayload))
+	csum, err := c.pseudoheader.pseudoheaderChecksum()
+	if err != nil {
+		return 0, err
+	}
+	csum += uint32(headerProtocol)
+	csum += length & 0xffff
+	csum += length >> 16
+	return tcpipChecksum(headerAndPayload, csum), nil
+}
+
+// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
+// This is needed for computing the checksum when serializing, since TCP/IP transport
+// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
+// The passed in layer must be an *IPv4 or *IPv6.
+func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
+	switch v := l.(type) {
+	case *IPv4:
+		i.pseudoheader = v
+	case *IPv6:
+		i.pseudoheader = v
+	default:
+		return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/google/gopacket/layers/test_creator.py
new file mode 100644
index 0000000..c92d276
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/test_creator.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2012 Google, Inc. All rights reserved.
+
+"""TestCreator creates test templates from pcap files."""
+
+import argparse
+import base64
+import glob
+import re
+import string
+import subprocess
+import sys
+
+
+class Packet(object):
+  """Helper class encapsulating packet from a pcap file."""
+
+  def __init__(self, packet_lines):
+    self.packet_lines = packet_lines
+    self.data = self._DecodeText(packet_lines)
+
+  @classmethod
+  def _DecodeText(cls, packet_lines):
+    packet_bytes = []
+    # First line is timestamp and stuff, skip it.
+    # Format: 0x0010:  0000 0020 3aff 3ffe 0000 0000 0000 0000  ....:.?.........
+
+    for line in packet_lines[1:]:
+      m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE)
+      if m is None: continue
+      for hexpart in m.group(1).split():
+        packet_bytes.append(base64.b16decode(hexpart.upper()))
+    return ''.join(packet_bytes)
+
+  def Test(self, name, link_type):
+    """Yields a test using this packet, as a set of lines."""
+    yield '// testPacket%s is the packet:' % name
+    for line in self.packet_lines:
+      yield '//   ' + line
+    yield 'var testPacket%s = []byte{' % name
+    data = list(self.data)
+    while data:
+      linebytes, data = data[:16], data[16:]
+      yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes])
+    yield '}'
+    yield 'func TestPacket%s(t *testing.T) {' % name
+    yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type)
+    yield '\tif p.ErrorLayer() != nil {'
+    yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())'
+    yield '\t}'
+    yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type
+    yield '}'
+    yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name
+    yield '\tfor i := 0; i < b.N; i++ {'
+    yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type)
+    yield '\t}'
+    yield '}'
+
+
+
+def GetTcpdumpOutput(filename):
+  """Runs tcpdump on the given file, returning output as string."""
+  return subprocess.check_output(
+      ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename])
+
+
+def TcpdumpOutputToPackets(output):
+  """Reads a pcap file with TCPDump, yielding Packet objects."""
+  pdata = []
+  for line in output.splitlines():
+    if line[0] not in string.whitespace and pdata:
+      yield Packet(pdata)
+      pdata = []
+    pdata.append(line)
+  if pdata:
+    yield Packet(pdata)
+
+
+def main():
+  class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+    def _format_usage(self, usage, actions, groups, prefix=None):
+      header =('TestCreator creates gopacket tests using a pcap file.\n\n'
+               'Tests are written to standard out... they can then be \n'
+               'copied into the file of your choice and modified as \n'
+               'you see.\n\n')
+      return header + argparse.ArgumentDefaultsHelpFormatter._format_usage(
+        self, usage, actions, groups, prefix)
+
+  parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter)
+  parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)')
+  parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it')
+  parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process')
+
+  args = parser.parse_args()
+
+  for arg in args.files:
+    for path in glob.glob(arg):
+      for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))):
+        print '\n'.join(packet.Test(
+          args.name % i, args.link_type))
+
+if __name__ == '__main__':
+    main()
diff --git a/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/google/gopacket/layers/tls.go
new file mode 100644
index 0000000..ddb6ff9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls.go
@@ -0,0 +1,208 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSType defines the type of data after the TLS Record
+type TLSType uint8
+
+// TLSType known values.
+const (
+	TLSChangeCipherSpec TLSType = 20
+	TLSAlert            TLSType = 21
+	TLSHandshake        TLSType = 22
+	TLSApplicationData  TLSType = 23
+	TLSUnknown          TLSType = 255
+)
+
+// String shows the register type nicely formatted
+func (tt TLSType) String() string {
+	switch tt {
+	default:
+		return "Unknown"
+	case TLSChangeCipherSpec:
+		return "Change Cipher Spec"
+	case TLSAlert:
+		return "Alert"
+	case TLSHandshake:
+		return "Handshake"
+	case TLSApplicationData:
+		return "Application Data"
+	}
+}
+
+// TLSVersion represents the TLS version in numeric format
+type TLSVersion uint16
+
+// Strings shows the TLS version nicely formatted
+func (tv TLSVersion) String() string {
+	switch tv {
+	default:
+		return "Unknown"
+	case 0x0200:
+		return "SSL 2.0"
+	case 0x0300:
+		return "SSL 3.0"
+	case 0x0301:
+		return "TLS 1.0"
+	case 0x0302:
+		return "TLS 1.1"
+	case 0x0303:
+		return "TLS 1.2"
+	case 0x0304:
+		return "TLS 1.3"
+	}
+}
+
+// TLS is specified in RFC 5246
+//
+//  TLS Record Protocol
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |     Content Type      |
+//  +--+--+--+--+--+--+--+--+
+//  |    Version (major)    |
+//  +--+--+--+--+--+--+--+--+
+//  |    Version (minor)    |
+//  +--+--+--+--+--+--+--+--+
+//  |        Length         |
+//  +--+--+--+--+--+--+--+--+
+//  |        Length         |
+//  +--+--+--+--+--+--+--+--+
+
+// TLS is actually a slide of TLSrecord structures
+type TLS struct {
+	BaseLayer
+
+	// TLS Records
+	ChangeCipherSpec []TLSChangeCipherSpecRecord
+	Handshake        []TLSHandshakeRecord
+	AppData          []TLSAppDataRecord
+	Alert            []TLSAlertRecord
+}
+
+// TLSRecordHeader contains all the information that each TLS Record types should have
+type TLSRecordHeader struct {
+	ContentType TLSType
+	Version     TLSVersion
+	Length      uint16
+}
+
+// LayerType returns gopacket.LayerTypeTLS.
+func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS }
+
+// decodeTLS decodes the byte slice into a TLS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeTLS(data []byte, p gopacket.PacketBuilder) error {
+	t := &TLS{}
+	err := t.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(t)
+	p.SetApplicationLayer(t)
+	return nil
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	t.BaseLayer.Contents = data
+	t.BaseLayer.Payload = nil
+
+	t.ChangeCipherSpec = t.ChangeCipherSpec[:0]
+	t.Handshake = t.Handshake[:0]
+	t.AppData = t.AppData[:0]
+	t.Alert = t.Alert[:0]
+
+	return t.decodeTLSRecords(data, df)
+}
+
+func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 5 {
+		df.SetTruncated()
+		return errors.New("TLS record too short")
+	}
+
+	// since there are no further layers, the baselayer's content is
+	// pointing to this layer
+	t.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	var h TLSRecordHeader
+	h.ContentType = TLSType(data[0])
+	h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3]))
+	h.Length = binary.BigEndian.Uint16(data[3:5])
+
+	if h.ContentType.String() == "Unknown" {
+		return errors.New("Unknown TLS record type")
+	}
+
+	hl := 5 // header length
+	tl := hl + int(h.Length)
+	if len(data) < tl {
+		df.SetTruncated()
+		return errors.New("TLS packet length mismatch")
+	}
+
+	switch h.ContentType {
+	default:
+		return errors.New("Unknown TLS record type")
+	case TLSChangeCipherSpec:
+		var r TLSChangeCipherSpecRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.ChangeCipherSpec = append(t.ChangeCipherSpec, r)
+	case TLSAlert:
+		var r TLSAlertRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.Alert = append(t.Alert, r)
+	case TLSHandshake:
+		var r TLSHandshakeRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.Handshake = append(t.Handshake, r)
+	case TLSApplicationData:
+		var r TLSAppDataRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.AppData = append(t.AppData, r)
+	}
+
+	if len(data) == tl {
+		return nil
+	}
+	return t.decodeTLSRecords(data[tl:len(data)], df)
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (t *TLS) CanDecode() gopacket.LayerClass {
+	return LayerTypeTLS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (t *TLS) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
+func (t *TLS) Payload() []byte {
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/google/gopacket/layers/tls_alert.go
new file mode 100644
index 0000000..0c5aee0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_alert.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// TLSAlertLevel defines the alert level data type
+type TLSAlertLevel uint8
+
+// TLSAlertDescr defines the alert descrption data type
+type TLSAlertDescr uint8
+
+const (
+	TLSAlertWarning      TLSAlertLevel = 1
+	TLSAlertFatal        TLSAlertLevel = 2
+	TLSAlertUnknownLevel TLSAlertLevel = 255
+
+	TLSAlertCloseNotify               TLSAlertDescr = 0
+	TLSAlertUnexpectedMessage         TLSAlertDescr = 10
+	TLSAlertBadRecordMac              TLSAlertDescr = 20
+	TLSAlertDecryptionFailedRESERVED  TLSAlertDescr = 21
+	TLSAlertRecordOverflow            TLSAlertDescr = 22
+	TLSAlertDecompressionFailure      TLSAlertDescr = 30
+	TLSAlertHandshakeFailure          TLSAlertDescr = 40
+	TLSAlertNoCertificateRESERVED     TLSAlertDescr = 41
+	TLSAlertBadCertificate            TLSAlertDescr = 42
+	TLSAlertUnsupportedCertificate    TLSAlertDescr = 43
+	TLSAlertCertificateRevoked        TLSAlertDescr = 44
+	TLSAlertCertificateExpired        TLSAlertDescr = 45
+	TLSAlertCertificateUnknown        TLSAlertDescr = 46
+	TLSAlertIllegalParameter          TLSAlertDescr = 47
+	TLSAlertUnknownCa                 TLSAlertDescr = 48
+	TLSAlertAccessDenied              TLSAlertDescr = 49
+	TLSAlertDecodeError               TLSAlertDescr = 50
+	TLSAlertDecryptError              TLSAlertDescr = 51
+	TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60
+	TLSAlertProtocolVersion           TLSAlertDescr = 70
+	TLSAlertInsufficientSecurity      TLSAlertDescr = 71
+	TLSAlertInternalError             TLSAlertDescr = 80
+	TLSAlertUserCanceled              TLSAlertDescr = 90
+	TLSAlertNoRenegotiation           TLSAlertDescr = 100
+	TLSAlertUnsupportedExtension      TLSAlertDescr = 110
+	TLSAlertUnknownDescription        TLSAlertDescr = 255
+)
+
+//  TLS Alert
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |         Level         |
+//  +--+--+--+--+--+--+--+--+
+//  |      Description      |
+//  +--+--+--+--+--+--+--+--+
+
+// TLSAlertRecord contains all the information that each Alert Record type should have
+type TLSAlertRecord struct {
+	TLSRecordHeader
+
+	Level       TLSAlertLevel
+	Description TLSAlertDescr
+
+	EncryptedMsg []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) < 2 {
+		df.SetTruncated()
+		return errors.New("TLS Alert packet too short")
+	}
+
+	if t.Length == 2 {
+		t.Level = TLSAlertLevel(data[0])
+		t.Description = TLSAlertDescr(data[1])
+	} else {
+		t.Level = TLSAlertUnknownLevel
+		t.Description = TLSAlertUnknownDescription
+		t.EncryptedMsg = data
+	}
+
+	return nil
+}
+
+// Strings shows the TLS alert level nicely formatted
+func (al TLSAlertLevel) String() string {
+	switch al {
+	default:
+		return fmt.Sprintf("Unknown(%d)", al)
+	case TLSAlertWarning:
+		return "Warning"
+	case TLSAlertFatal:
+		return "Fatal"
+	}
+}
+
+// Strings shows the TLS alert description nicely formatted
+func (ad TLSAlertDescr) String() string {
+	switch ad {
+	default:
+		return "Unknown"
+	case TLSAlertCloseNotify:
+		return "close_notify"
+	case TLSAlertUnexpectedMessage:
+		return "unexpected_message"
+	case TLSAlertBadRecordMac:
+		return "bad_record_mac"
+	case TLSAlertDecryptionFailedRESERVED:
+		return "decryption_failed_RESERVED"
+	case TLSAlertRecordOverflow:
+		return "record_overflow"
+	case TLSAlertDecompressionFailure:
+		return "decompression_failure"
+	case TLSAlertHandshakeFailure:
+		return "handshake_failure"
+	case TLSAlertNoCertificateRESERVED:
+		return "no_certificate_RESERVED"
+	case TLSAlertBadCertificate:
+		return "bad_certificate"
+	case TLSAlertUnsupportedCertificate:
+		return "unsupported_certificate"
+	case TLSAlertCertificateRevoked:
+		return "certificate_revoked"
+	case TLSAlertCertificateExpired:
+		return "certificate_expired"
+	case TLSAlertCertificateUnknown:
+		return "certificate_unknown"
+	case TLSAlertIllegalParameter:
+		return "illegal_parameter"
+	case TLSAlertUnknownCa:
+		return "unknown_ca"
+	case TLSAlertAccessDenied:
+		return "access_denied"
+	case TLSAlertDecodeError:
+		return "decode_error"
+	case TLSAlertDecryptError:
+		return "decrypt_error"
+	case TLSAlertExportRestrictionRESERVED:
+		return "export_restriction_RESERVED"
+	case TLSAlertProtocolVersion:
+		return "protocol_version"
+	case TLSAlertInsufficientSecurity:
+		return "insufficient_security"
+	case TLSAlertInternalError:
+		return "internal_error"
+	case TLSAlertUserCanceled:
+		return "user_canceled"
+	case TLSAlertNoRenegotiation:
+		return "no_renegotiation"
+	case TLSAlertUnsupportedExtension:
+		return "unsupported_extension"
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/google/gopacket/layers/tls_appdata.go
new file mode 100644
index 0000000..dedd1d5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_appdata.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSAppDataRecord contains all the information that each AppData Record types should have
+type TLSAppDataRecord struct {
+	TLSRecordHeader
+	Payload []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) != int(t.Length) {
+		return errors.New("TLS Application Data length mismatch")
+	}
+
+	t.Payload = data
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
new file mode 100644
index 0000000..8f3dc62
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record
+type TLSchangeCipherSpec uint8
+
+const (
+	TLSChangecipherspecMessage TLSchangeCipherSpec = 1
+	TLSChangecipherspecUnknown TLSchangeCipherSpec = 255
+)
+
+//  TLS Change Cipher Spec
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |        Message        |
+//  +--+--+--+--+--+--+--+--+
+
+// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record
+type TLSChangeCipherSpecRecord struct {
+	TLSRecordHeader
+
+	Message TLSchangeCipherSpec
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) != 1 {
+		df.SetTruncated()
+		return errors.New("TLS Change Cipher Spec record incorrect length")
+	}
+
+	t.Message = TLSchangeCipherSpec(data[0])
+	if t.Message != TLSChangecipherspecMessage {
+		t.Message = TLSChangecipherspecUnknown
+	}
+
+	return nil
+}
+
+// String shows the message value nicely formatted
+func (ccs TLSchangeCipherSpec) String() string {
+	switch ccs {
+	default:
+		return "Unknown"
+	case TLSChangecipherspecMessage:
+		return "Change Cipher Spec Message"
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/google/gopacket/layers/tls_handshake.go
new file mode 100644
index 0000000..e45e2c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_handshake.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+// TLSHandshakeRecord defines the structure of a Handshare Record
+type TLSHandshakeRecord struct {
+	TLSRecordHeader
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	// TODO
+
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/google/gopacket/layers/udp.go
new file mode 100644
index 0000000..97e81c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udp.go
@@ -0,0 +1,133 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// UDP is the layer for UDP headers.
+type UDP struct {
+	BaseLayer
+	SrcPort, DstPort UDPPort
+	Length           uint16
+	Checksum         uint16
+	sPort, dPort     []byte
+	tcpipchecksum
+}
+
+// LayerType returns gopacket.LayerTypeUDP
+func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP }
+
+func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data))
+	}
+	udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2]))
+	udp.sPort = data[0:2]
+	udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4]))
+	udp.dPort = data[2:4]
+	udp.Length = binary.BigEndian.Uint16(data[4:6])
+	udp.Checksum = binary.BigEndian.Uint16(data[6:8])
+	udp.BaseLayer = BaseLayer{Contents: data[:8]}
+	switch {
+	case udp.Length >= 8:
+		hlen := int(udp.Length)
+		if hlen > len(data) {
+			df.SetTruncated()
+			hlen = len(data)
+		}
+		udp.Payload = data[8:hlen]
+	case udp.Length == 0: // Jumbogram, use entire rest of data
+		udp.Payload = data[8:]
+	default:
+		return fmt.Errorf("UDP packet too small: %d bytes", udp.Length)
+	}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var jumbo bool
+
+	payload := b.Bytes()
+	if _, ok := u.pseudoheader.(*IPv6); ok {
+		if len(payload)+8 > 65535 {
+			jumbo = true
+		}
+	}
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort))
+	if opts.FixLengths {
+		if jumbo {
+			u.Length = 0
+		} else {
+			u.Length = uint16(len(payload)) + 8
+		}
+	}
+	binary.BigEndian.PutUint16(bytes[4:], u.Length)
+	if opts.ComputeChecksums {
+		// zero out checksum bytes
+		bytes[6] = 0
+		bytes[7] = 0
+		csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP)
+		if err != nil {
+			return err
+		}
+		u.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[6:], u.Checksum)
+	return nil
+}
+
+func (u *UDP) CanDecode() gopacket.LayerClass {
+	return LayerTypeUDP
+}
+
+// NextLayerType use the destination port to select the
+// right next decoder. It tries first to decode via the
+// destination port, then the source port.
+func (u *UDP) NextLayerType() gopacket.LayerType {
+	if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload {
+		return lt
+	}
+	return u.SrcPort.LayerType()
+}
+
+func decodeUDP(data []byte, p gopacket.PacketBuilder) error {
+	udp := &UDP{}
+	err := udp.DecodeFromBytes(data, p)
+	p.AddLayer(udp)
+	p.SetTransportLayer(udp)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(udp.NextLayerType())
+}
+
+func (u *UDP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort)
+}
+
+// For testing only
+func (u *UDP) SetInternalPortsForTesting() {
+	u.sPort = make([]byte, 2)
+	u.dPort = make([]byte, 2)
+	binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort))
+	binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/google/gopacket/layers/udplite.go
new file mode 100644
index 0000000..7d84c51
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udplite.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// UDPLite is the layer for UDP-Lite headers (rfc 3828).
+type UDPLite struct {
+	BaseLayer
+	SrcPort, DstPort UDPLitePort
+	ChecksumCoverage uint16
+	Checksum         uint16
+	sPort, dPort     []byte
+}
+
+// LayerType returns gopacket.LayerTypeUDPLite
+func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite }
+
+func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error {
+	udp := &UDPLite{
+		SrcPort:          UDPLitePort(binary.BigEndian.Uint16(data[0:2])),
+		sPort:            data[0:2],
+		DstPort:          UDPLitePort(binary.BigEndian.Uint16(data[2:4])),
+		dPort:            data[2:4],
+		ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]),
+		Checksum:         binary.BigEndian.Uint16(data[6:8]),
+		BaseLayer:        BaseLayer{data[:8], data[8:]},
+	}
+	p.AddLayer(udp)
+	p.SetTransportLayer(udp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (u *UDPLite) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort)
+}
diff --git a/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/google/gopacket/layers/usb.go
new file mode 100644
index 0000000..0b4d4af
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/usb.go
@@ -0,0 +1,287 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+type USBEventType uint8
+
+const (
+	USBEventTypeSubmit   USBEventType = 'S'
+	USBEventTypeComplete USBEventType = 'C'
+	USBEventTypeError    USBEventType = 'E'
+)
+
+func (a USBEventType) String() string {
+	switch a {
+	case USBEventTypeSubmit:
+		return "SUBMIT"
+	case USBEventTypeComplete:
+		return "COMPLETE"
+	case USBEventTypeError:
+		return "ERROR"
+	default:
+		return "Unknown event type"
+	}
+}
+
+type USBRequestBlockSetupRequest uint8
+
+const (
+	USBRequestBlockSetupRequestGetStatus        USBRequestBlockSetupRequest = 0x00
+	USBRequestBlockSetupRequestClearFeature     USBRequestBlockSetupRequest = 0x01
+	USBRequestBlockSetupRequestSetFeature       USBRequestBlockSetupRequest = 0x03
+	USBRequestBlockSetupRequestSetAddress       USBRequestBlockSetupRequest = 0x05
+	USBRequestBlockSetupRequestGetDescriptor    USBRequestBlockSetupRequest = 0x06
+	USBRequestBlockSetupRequestSetDescriptor    USBRequestBlockSetupRequest = 0x07
+	USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08
+	USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09
+	USBRequestBlockSetupRequestSetIdle          USBRequestBlockSetupRequest = 0x0a
+)
+
+func (a USBRequestBlockSetupRequest) String() string {
+	switch a {
+	case USBRequestBlockSetupRequestGetStatus:
+		return "GET_STATUS"
+	case USBRequestBlockSetupRequestClearFeature:
+		return "CLEAR_FEATURE"
+	case USBRequestBlockSetupRequestSetFeature:
+		return "SET_FEATURE"
+	case USBRequestBlockSetupRequestSetAddress:
+		return "SET_ADDRESS"
+	case USBRequestBlockSetupRequestGetDescriptor:
+		return "GET_DESCRIPTOR"
+	case USBRequestBlockSetupRequestSetDescriptor:
+		return "SET_DESCRIPTOR"
+	case USBRequestBlockSetupRequestGetConfiguration:
+		return "GET_CONFIGURATION"
+	case USBRequestBlockSetupRequestSetConfiguration:
+		return "SET_CONFIGURATION"
+	case USBRequestBlockSetupRequestSetIdle:
+		return "SET_IDLE"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+type USBTransportType uint8
+
+const (
+	USBTransportTypeTransferIn  USBTransportType = 0x80 // Indicates send or receive
+	USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream.
+	USBTransportTypeInterrupt   USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards.
+	USBTransportTypeControl     USBTransportType = 0x02 // Control transfers are typically used for command and status operations.
+	USBTransportTypeBulk        USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers.
+)
+
+type USBDirectionType uint8
+
+const (
+	USBDirectionTypeUnknown USBDirectionType = iota
+	USBDirectionTypeIn
+	USBDirectionTypeOut
+)
+
+func (a USBDirectionType) String() string {
+	switch a {
+	case USBDirectionTypeIn:
+		return "In"
+	case USBDirectionTypeOut:
+		return "Out"
+	default:
+		return "Unknown direction type"
+	}
+}
+
+// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol.
+type USB struct {
+	BaseLayer
+	ID             uint64
+	EventType      USBEventType
+	TransferType   USBTransportType
+	Direction      USBDirectionType
+	EndpointNumber uint8
+	DeviceAddress  uint8
+	BusID          uint16
+	TimestampSec   int64
+	TimestampUsec  int32
+	Setup          bool
+	Data           bool
+	Status         int32
+	UrbLength      uint32
+	UrbDataLength  uint32
+
+	UrbInterval            uint32
+	UrbStartFrame          uint32
+	UrbCopyOfTransferFlags uint32
+	IsoNumDesc             uint32
+}
+
+func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB }
+
+func (m *USB) NextLayerType() gopacket.LayerType {
+	if m.Setup {
+		return LayerTypeUSBRequestBlockSetup
+	} else if m.Data {
+	}
+
+	return m.TransferType.LayerType()
+}
+
+func decodeUSB(data []byte, p gopacket.PacketBuilder) error {
+	d := &USB{}
+
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.ID = binary.LittleEndian.Uint64(data[0:8])
+	m.EventType = USBEventType(data[8])
+	m.TransferType = USBTransportType(data[9])
+
+	m.EndpointNumber = data[10] & 0x7f
+	if data[10]&uint8(USBTransportTypeTransferIn) > 0 {
+		m.Direction = USBDirectionTypeIn
+	} else {
+		m.Direction = USBDirectionTypeOut
+	}
+
+	m.DeviceAddress = data[11]
+	m.BusID = binary.LittleEndian.Uint16(data[12:14])
+
+	if uint(data[14]) == 0 {
+		m.Setup = true
+	}
+
+	if uint(data[15]) == 0 {
+		m.Data = true
+	}
+
+	m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24]))
+	m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28]))
+	m.Status = int32(binary.LittleEndian.Uint32(data[28:32]))
+	m.UrbLength = binary.LittleEndian.Uint32(data[32:36])
+	m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40])
+
+	m.Contents = data[:40]
+	m.Payload = data[40:]
+
+	if m.Setup {
+		m.Payload = data[40:]
+	} else if m.Data {
+		m.Payload = data[uint32(len(data))-m.UrbDataLength:]
+	}
+
+	// if 64 bit, dissect_linux_usb_pseudo_header_ext
+	if false {
+		m.UrbInterval = binary.LittleEndian.Uint32(data[40:44])
+		m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48])
+		m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52])
+		m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56])
+		m.Contents = data[:56]
+		m.Payload = data[56:]
+	}
+
+	// crc5 or crc16
+	// eop (end of packet)
+
+	return nil
+}
+
+type USBRequestBlockSetup struct {
+	BaseLayer
+	RequestType uint8
+	Request     USBRequestBlockSetupRequest
+	Value       uint16
+	Index       uint16
+	Length      uint16
+}
+
+func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup }
+
+func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.RequestType = data[0]
+	m.Request = USBRequestBlockSetupRequest(data[1])
+	m.Value = binary.LittleEndian.Uint16(data[2:4])
+	m.Index = binary.LittleEndian.Uint16(data[4:6])
+	m.Length = binary.LittleEndian.Uint16(data[6:8])
+	m.Contents = data[:8]
+	m.Payload = data[8:]
+	return nil
+}
+
+func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBRequestBlockSetup{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBControl struct {
+	BaseLayer
+}
+
+func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl }
+
+func (m *USBControl) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBControl{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBInterrupt struct {
+	BaseLayer
+}
+
+func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt }
+
+func (m *USBInterrupt) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBInterrupt{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBBulk struct {
+	BaseLayer
+}
+
+func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk }
+
+func (m *USBBulk) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBBulk{}
+	return decodingLayerDecoder(d, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/google/gopacket/layers/vrrp.go
new file mode 100644
index 0000000..ffaafe6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vrrp.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+/*
+	This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2.
+	https://tools.ietf.org/html/rfc3768#section-5
+    0                   1                   2                   3
+    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |Version| Type  | Virtual Rtr ID|   Priority    | Count IP Addrs|
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |   Auth Type   |   Adver Int   |          Checksum             |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                         IP Address (1)                        |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                            .                                  |
+   |                            .                                  |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                         IP Address (n)                        |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                     Authentication Data (1)                   |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                     Authentication Data (2)                   |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+type VRRPv2Type uint8
+type VRRPv2AuthType uint8
+
+const (
+	VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement
+)
+
+// String conversions for VRRP message types
+func (v VRRPv2Type) String() string {
+	switch v {
+	case VRRPv2Advertisement:
+		return "VRRPv2 Advertisement"
+	default:
+		return ""
+	}
+}
+
+const (
+	VRRPv2AuthNoAuth    VRRPv2AuthType = 0x00 // No Authentication
+	VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1
+	VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2
+)
+
+func (v VRRPv2AuthType) String() string {
+	switch v {
+	case VRRPv2AuthNoAuth:
+		return "No Authentication"
+	case VRRPv2AuthReserved1:
+		return "Reserved"
+	case VRRPv2AuthReserved2:
+		return "Reserved"
+	default:
+		return ""
+	}
+}
+
+// VRRPv2 represents an VRRP v2 message.
+type VRRPv2 struct {
+	BaseLayer
+	Version      uint8          // The version field specifies the VRRP protocol version of this packet (v2)
+	Type         VRRPv2Type     // The type field specifies the type of this VRRP packet.  The only type defined in v2 is ADVERTISEMENT
+	VirtualRtrID uint8          // identifies the virtual router this packet is reporting status for
+	Priority     uint8          // specifies the sending VRRP router's priority for the virtual router (100 = default)
+	CountIPAddr  uint8          // The number of IP addresses contained in this VRRP advertisement.
+	AuthType     VRRPv2AuthType // identifies the authentication method being utilized
+	AdverInt     uint8          // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS.  The default is 1 second
+	Checksum     uint16         // used to detect data corruption in the VRRP message.
+	IPAddress    []net.IP       // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field.
+}
+
+// LayerType returns LayerTypeVRRP for VRRP v2 message.
+func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP }
+
+func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	v.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+	v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2
+
+	v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement)
+	if v.Type != 1 {
+		// rfc3768: A packet with unknown type MUST be discarded.
+		return errors.New("Unrecognized VRRPv2 type field.")
+	}
+
+	v.VirtualRtrID = data[1]
+	v.Priority = data[2]
+
+	v.CountIPAddr = data[3]
+	if v.CountIPAddr < 1 {
+		return errors.New("VRRPv2 number of IP addresses is not valid.")
+	}
+
+	v.AuthType = VRRPv2AuthType(data[4])
+	v.AdverInt = uint8(data[5])
+	v.Checksum = binary.BigEndian.Uint16(data[6:8])
+
+	// populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field
+	// offset references the starting byte containing the list of ip addresses
+	offset := 8
+	for i := uint8(0); i < v.CountIPAddr; i++ {
+		v.IPAddress = append(v.IPAddress, data[offset:offset+4])
+		offset += 4
+	}
+
+	//	any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC
+	//
+	//			5.3.10.  Authentication Data
+	//
+	//			The authentication string is currently only used to maintain
+	//			backwards compatibility with RFC 2338.  It SHOULD be set to zero on
+	//	   		transmission and ignored on reception.
+	return nil
+}
+
+// CanDecode specifies the layer type in which we are attempting to unwrap.
+func (v *VRRPv2) CanDecode() gopacket.LayerClass {
+	return LayerTypeVRRP
+}
+
+// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0
+func (v *VRRPv2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// The VRRP packet does not include payload data. Setting byte slice to nil
+func (v *VRRPv2) Payload() []byte {
+	return nil
+}
+
+// decodeVRRP will parse VRRP v2
+func decodeVRRP(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 8 {
+		return errors.New("Not a valid VRRP packet. Packet length is too small.")
+	}
+	v := &VRRPv2{}
+	return decodingLayerDecoder(v, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/google/gopacket/layers/vxlan.go
new file mode 100644
index 0000000..4f79ea4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vxlan.go
@@ -0,0 +1,98 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+//  VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348
+//  G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//  0             8               16              24              32
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R|       Group Policy ID         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |     24 bit VXLAN Network Identifier           |   Reserved    |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// VXLAN is a VXLAN packet header
+type VXLAN struct {
+	BaseLayer
+	ValidIDFlag      bool   // 'I' bit per RFC 7348
+	VNI              uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348
+	GBPExtension     bool   // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+	GBPDontLearn     bool   // 'D' bit per Group Policy
+	GBPApplied       bool   // 'A' bit per Group Policy
+	GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy
+}
+
+// LayerType returns LayerTypeVXLAN
+func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN }
+
+func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error {
+	vx := &VXLAN{}
+
+	// VNI is a 24bit number, Uint32 requires 32 bits
+	var buf [4]byte
+	copy(buf[1:], data[4:7])
+
+	// RFC 7348 https://tools.ietf.org/html/rfc7348
+	vx.ValidIDFlag = data[0]&0x08 > 0        // 'I' bit per RFC7348
+	vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348
+
+	// Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+	vx.GBPExtension = data[0]&0x80 > 0                       // 'G' bit per the group policy draft
+	vx.GBPDontLearn = data[1]&0x40 > 0                       // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame.
+	vx.GBPApplied = data[1]&0x80 > 0                         // 'A' bit - indicates that the group policy has already been applied to this packet.
+	vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft
+
+	// Layer information
+	const vxlanLength = 8
+	vx.Contents = data[:vxlanLength]
+	vx.Payload = data[vxlanLength:]
+
+	p.AddLayer(vx)
+	return p.NextDecoder(LinkTypeEthernet)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+
+	// PrependBytes does not guarantee that bytes are zeroed.  Setting flags via OR requires that they start off at zero
+	bytes[0] = 0
+	bytes[1] = 0
+
+	if vx.ValidIDFlag {
+		bytes[0] |= 0x08
+	}
+	if vx.GBPExtension {
+		bytes[0] |= 0x80
+	}
+	if vx.GBPDontLearn {
+		bytes[1] |= 0x40
+	}
+	if vx.GBPApplied {
+		bytes[1] |= 0x80
+	}
+
+	binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID)
+	if vx.VNI >= 1<<24 {
+		return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI)
+	}
+	binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/google/gopacket/layertype.go
new file mode 100644
index 0000000..3abfee1
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layertype.go
@@ -0,0 +1,111 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// LayerType is a unique identifier for each type of layer.  This enumeration
+// does not match with any externally available numbering scheme... it's solely
+// usable/useful within this library as a means for requesting layer types
+// (see Packet.Layer) and determining which types of layers have been decoded.
+//
+// New LayerTypes may be created by calling gopacket.RegisterLayerType.
+type LayerType int64
+
+// LayerTypeMetadata contains metadata associated with each LayerType.
+type LayerTypeMetadata struct {
+	// Name is the string returned by each layer type's String method.
+	Name string
+	// Decoder is the decoder to use when the layer type is passed in as a
+	// Decoder.
+	Decoder Decoder
+}
+
+type layerTypeMetadata struct {
+	inUse bool
+	LayerTypeMetadata
+}
+
+// DecodersByLayerName maps layer names to decoders for those layers.
+// This allows users to specify decoders by name to a program and have that
+// program pick the correct decoder accordingly.
+var DecodersByLayerName = map[string]Decoder{}
+
+const maxLayerType = 2000
+
+var ltMeta [maxLayerType]layerTypeMetadata
+var ltMetaMap = map[LayerType]layerTypeMetadata{}
+
+// RegisterLayerType creates a new layer type and registers it globally.
+// The number passed in must be unique, or a runtime panic will occur.  Numbers
+// 0-999 are reserved for the gopacket library.  Numbers 1000-1999 should be
+// used for common application-specific types, and are very fast.  Any other
+// number (negative or >= 2000) may be used for uncommon application-specific
+// types, and are somewhat slower (they require a map lookup over an array
+// index).
+func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
+	if 0 <= num && num < maxLayerType {
+		if ltMeta[num].inUse {
+			panic("Layer type already exists")
+		}
+	} else {
+		if ltMetaMap[LayerType(num)].inUse {
+			panic("Layer type already exists")
+		}
+	}
+	return OverrideLayerType(num, meta)
+}
+
+// OverrideLayerType acts like RegisterLayerType, except that if the layer type
+// has already been registered, it overrides the metadata with the passed-in
+// metadata intead of panicing.
+func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
+	if 0 <= num && num < maxLayerType {
+		ltMeta[num] = layerTypeMetadata{
+			inUse:             true,
+			LayerTypeMetadata: meta,
+		}
+	} else {
+		ltMetaMap[LayerType(num)] = layerTypeMetadata{
+			inUse:             true,
+			LayerTypeMetadata: meta,
+		}
+	}
+	DecodersByLayerName[meta.Name] = meta.Decoder
+	return LayerType(num)
+}
+
+// Decode decodes the given data using the decoder registered with the layer
+// type.
+func (t LayerType) Decode(data []byte, c PacketBuilder) error {
+	var d Decoder
+	if 0 <= int(t) && int(t) < maxLayerType {
+		d = ltMeta[int(t)].Decoder
+	} else {
+		d = ltMetaMap[t].Decoder
+	}
+	if d != nil {
+		return d.Decode(data, c)
+	}
+	return fmt.Errorf("Layer type %v has no associated decoder", t)
+}
+
+// String returns the string associated with this layer type.
+func (t LayerType) String() (s string) {
+	if 0 <= int(t) && int(t) < maxLayerType {
+		s = ltMeta[int(t)].Name
+	} else {
+		s = ltMetaMap[t].Name
+	}
+	if s == "" {
+		s = strconv.Itoa(int(t))
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go
new file mode 100644
index 0000000..3a7c4b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/packet.go
@@ -0,0 +1,864 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"syscall"
+	"time"
+)
+
+// CaptureInfo provides standardized information about a packet captured off
+// the wire or read from a file.
+type CaptureInfo struct {
+	// Timestamp is the time the packet was captured, if that is known.
+	Timestamp time.Time
+	// CaptureLength is the total number of bytes read off of the wire.
+	CaptureLength int
+	// Length is the size of the original packet.  Should always be >=
+	// CaptureLength.
+	Length int
+	// InterfaceIndex
+	InterfaceIndex int
+	// The packet source can place ancillary data of various types here.
+	// For example, the afpacket source can report the VLAN of captured
+	// packets this way.
+	AncillaryData []interface{}
+}
+
+// PacketMetadata contains metadata for a packet.
+type PacketMetadata struct {
+	CaptureInfo
+	// Truncated is true if packet decoding logic detects that there are fewer
+	// bytes in the packet than are detailed in various headers (for example, if
+	// the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
+	// This is also set automatically for packets captured off the wire if
+	// CaptureInfo.CaptureLength < CaptureInfo.Length.
+	Truncated bool
+}
+
+// Packet is the primary object used by gopacket.  Packets are created by a
+// Decoder's Decode call.  A packet is made up of a set of Data, which
+// is broken into a number of Layers as it is decoded.
+type Packet interface {
+	//// Functions for outputting the packet as a human-readable string:
+	//// ------------------------------------------------------------------
+	// String returns a human-readable string representation of the packet.
+	// It uses LayerString on each layer to output the layer.
+	String() string
+	// Dump returns a verbose human-readable string representation of the packet,
+	// including a hex dump of all layers.  It uses LayerDump on each layer to
+	// output the layer.
+	Dump() string
+
+	//// Functions for accessing arbitrary packet layers:
+	//// ------------------------------------------------------------------
+	// Layers returns all layers in this packet, computing them as necessary
+	Layers() []Layer
+	// Layer returns the first layer in this packet of the given type, or nil
+	Layer(LayerType) Layer
+	// LayerClass returns the first layer in this packet of the given class,
+	// or nil.
+	LayerClass(LayerClass) Layer
+
+	//// Functions for accessing specific types of packet layers.  These functions
+	//// return the first layer of each type found within the packet.
+	//// ------------------------------------------------------------------
+	// LinkLayer returns the first link layer in the packet
+	LinkLayer() LinkLayer
+	// NetworkLayer returns the first network layer in the packet
+	NetworkLayer() NetworkLayer
+	// TransportLayer returns the first transport layer in the packet
+	TransportLayer() TransportLayer
+	// ApplicationLayer returns the first application layer in the packet
+	ApplicationLayer() ApplicationLayer
+	// ErrorLayer is particularly useful, since it returns nil if the packet
+	// was fully decoded successfully, and non-nil if an error was encountered
+	// in decoding and the packet was only partially decoded.  Thus, its output
+	// can be used to determine if the entire packet was able to be decoded.
+	ErrorLayer() ErrorLayer
+
+	//// Functions for accessing data specific to the packet:
+	//// ------------------------------------------------------------------
+	// Data returns the set of bytes that make up this entire packet.
+	Data() []byte
+	// Metadata returns packet metadata associated with this packet.
+	Metadata() *PacketMetadata
+}
+
+// packet contains all the information we need to fulfill the Packet interface,
+// and its two "subclasses" (yes, no such thing in Go, bear with me),
+// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
+// various functions needed to access this information.
+type packet struct {
+	// data contains the entire packet data for a packet
+	data []byte
+	// initialLayers is space for an initial set of layers already created inside
+	// the packet.
+	initialLayers [6]Layer
+	// layers contains each layer we've already decoded
+	layers []Layer
+	// last is the last layer added to the packet
+	last Layer
+	// metadata is the PacketMetadata for this packet
+	metadata PacketMetadata
+
+	decodeOptions DecodeOptions
+
+	// Pointers to the various important layers
+	link        LinkLayer
+	network     NetworkLayer
+	transport   TransportLayer
+	application ApplicationLayer
+	failure     ErrorLayer
+}
+
+func (p *packet) SetTruncated() {
+	p.metadata.Truncated = true
+}
+
+func (p *packet) SetLinkLayer(l LinkLayer) {
+	if p.link == nil {
+		p.link = l
+	}
+}
+
+func (p *packet) SetNetworkLayer(l NetworkLayer) {
+	if p.network == nil {
+		p.network = l
+	}
+}
+
+func (p *packet) SetTransportLayer(l TransportLayer) {
+	if p.transport == nil {
+		p.transport = l
+	}
+}
+
+func (p *packet) SetApplicationLayer(l ApplicationLayer) {
+	if p.application == nil {
+		p.application = l
+	}
+}
+
+func (p *packet) SetErrorLayer(l ErrorLayer) {
+	if p.failure == nil {
+		p.failure = l
+	}
+}
+
+func (p *packet) AddLayer(l Layer) {
+	p.layers = append(p.layers, l)
+	p.last = l
+}
+
+func (p *packet) DumpPacketData() {
+	fmt.Fprint(os.Stderr, p.packetDump())
+	os.Stderr.Sync()
+}
+
+func (p *packet) Metadata() *PacketMetadata {
+	return &p.metadata
+}
+
+func (p *packet) Data() []byte {
+	return p.data
+}
+
+func (p *packet) DecodeOptions() *DecodeOptions {
+	return &p.decodeOptions
+}
+
+func (p *packet) addFinalDecodeError(err error, stack []byte) {
+	fail := &DecodeFailure{err: err, stack: stack}
+	if p.last == nil {
+		fail.data = p.data
+	} else {
+		fail.data = p.last.LayerPayload()
+	}
+	p.AddLayer(fail)
+	p.SetErrorLayer(fail)
+}
+
+func (p *packet) recoverDecodeError() {
+	if !p.decodeOptions.SkipDecodeRecovery {
+		if r := recover(); r != nil {
+			p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
+		}
+	}
+}
+
+// LayerString outputs an individual layer as a string.  The layer is output
+// in a single line, with no trailing newline.  This function is specifically
+// designed to do the right thing for most layers... it follows the following
+// rules:
+//  * If the Layer has a String function, just output that.
+//  * Otherwise, output all exported fields in the layer, recursing into
+//    exported slices and structs.
+// NOTE:  This is NOT THE SAME AS fmt's "%#v".  %#v will output both exported
+// and unexported fields... many times packet layers contain unexported stuff
+// that would just mess up the output of the layer, see for example the
+// Payload layer and it's internal 'data' field, which contains a large byte
+// array that would really mess up formatting.
+func LayerString(l Layer) string {
+	return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
+}
+
+// Dumper dumps verbose information on a value.  If a layer type implements
+// Dumper, then its LayerDump() string will include the results in its output.
+type Dumper interface {
+	Dump() string
+}
+
+// LayerDump outputs a very verbose string representation of a layer.  Its
+// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
+// It contains newlines and ends with a newline.
+func LayerDump(l Layer) string {
+	var b bytes.Buffer
+	b.WriteString(LayerString(l))
+	b.WriteByte('\n')
+	if d, ok := l.(Dumper); ok {
+		dump := d.Dump()
+		if dump != "" {
+			b.WriteString(dump)
+			if dump[len(dump)-1] != '\n' {
+				b.WriteByte('\n')
+			}
+		}
+	}
+	b.WriteString(hex.Dump(l.LayerContents()))
+	return b.String()
+}
+
+// layerString outputs, recursively, a layer in a "smart" way.  See docs for
+// LayerString for more details.
+//
+// Params:
+//   i - value to write out
+//   anonymous:  if we're currently recursing an anonymous member of a struct
+//   writeSpace:  if we've already written a value in a struct, and need to
+//     write a space before writing more.  This happens when we write various
+//     anonymous values, and need to keep writing more.
+func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
+	// Let String() functions take precedence.
+	if v.CanInterface() {
+		if s, ok := v.Interface().(fmt.Stringer); ok {
+			return s.String()
+		}
+	}
+	// Reflect, and spit out all the exported fields as key=value.
+	switch v.Type().Kind() {
+	case reflect.Interface, reflect.Ptr:
+		if v.IsNil() {
+			return "nil"
+		}
+		r := v.Elem()
+		return layerString(r, anonymous, writeSpace)
+	case reflect.Struct:
+		var b bytes.Buffer
+		typ := v.Type()
+		if !anonymous {
+			b.WriteByte('{')
+		}
+		for i := 0; i < v.NumField(); i++ {
+			// Check if this is upper-case.
+			ftype := typ.Field(i)
+			f := v.Field(i)
+			if ftype.Anonymous {
+				anonStr := layerString(f, true, writeSpace)
+				writeSpace = writeSpace || anonStr != ""
+				b.WriteString(anonStr)
+			} else if ftype.PkgPath == "" { // exported
+				if writeSpace {
+					b.WriteByte(' ')
+				}
+				writeSpace = true
+				fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
+			}
+		}
+		if !anonymous {
+			b.WriteByte('}')
+		}
+		return b.String()
+	case reflect.Slice:
+		var b bytes.Buffer
+		b.WriteByte('[')
+		if v.Len() > 4 {
+			fmt.Fprintf(&b, "..%d..", v.Len())
+		} else {
+			for j := 0; j < v.Len(); j++ {
+				if j != 0 {
+					b.WriteString(", ")
+				}
+				b.WriteString(layerString(v.Index(j), false, false))
+			}
+		}
+		b.WriteByte(']')
+		return b.String()
+	}
+	return fmt.Sprintf("%v", v.Interface())
+}
+
+const (
+	longBytesLength = 128
+)
+
+// LongBytesGoString returns a string representation of the byte slice shortened
+// using the format '<type>{<truncated slice> ... (<n> bytes)}' if it
+// exceeds a predetermined length. Can be used to avoid filling the display with
+// very long byte strings.
+func LongBytesGoString(buf []byte) string {
+	if len(buf) < longBytesLength {
+		return fmt.Sprintf("%#v", buf)
+	}
+	s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
+	s = strings.TrimSuffix(s, "}")
+	return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
+}
+
+func baseLayerString(value reflect.Value) string {
+	t := value.Type()
+	content := value.Field(0)
+	c := make([]byte, content.Len())
+	for i := range c {
+		c[i] = byte(content.Index(i).Uint())
+	}
+	payload := value.Field(1)
+	p := make([]byte, payload.Len())
+	for i := range p {
+		p[i] = byte(payload.Index(i).Uint())
+	}
+	return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
+		LongBytesGoString(c),
+		LongBytesGoString(p))
+}
+
+func layerGoString(i interface{}, b *bytes.Buffer) {
+	if s, ok := i.(fmt.GoStringer); ok {
+		b.WriteString(s.GoString())
+		return
+	}
+
+	var v reflect.Value
+	var ok bool
+	if v, ok = i.(reflect.Value); !ok {
+		v = reflect.ValueOf(i)
+	}
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		if v.Kind() == reflect.Ptr {
+			b.WriteByte('&')
+		}
+		layerGoString(v.Elem().Interface(), b)
+	case reflect.Struct:
+		t := v.Type()
+		b.WriteString(t.String())
+		b.WriteByte('{')
+		for i := 0; i < v.NumField(); i++ {
+			if i > 0 {
+				b.WriteString(", ")
+			}
+			if t.Field(i).Name == "BaseLayer" {
+				fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
+			} else if v.Field(i).Kind() == reflect.Struct {
+				fmt.Fprintf(b, "%s:", t.Field(i).Name)
+				layerGoString(v.Field(i), b)
+			} else if v.Field(i).Kind() == reflect.Ptr {
+				b.WriteByte('&')
+				layerGoString(v.Field(i), b)
+			} else {
+				fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
+			}
+		}
+		b.WriteByte('}')
+	default:
+		fmt.Fprintf(b, "%#v", i)
+	}
+}
+
+// LayerGoString returns a representation of the layer in Go syntax,
+// taking care to shorten "very long" BaseLayer byte slices
+func LayerGoString(l Layer) string {
+	b := new(bytes.Buffer)
+	layerGoString(l, b)
+	return b.String()
+}
+
+func (p *packet) packetString() string {
+	var b bytes.Buffer
+	fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
+	if p.metadata.Truncated {
+		b.WriteString(", truncated")
+	}
+	if p.metadata.Length > 0 {
+		fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
+	}
+	if !p.metadata.Timestamp.IsZero() {
+		fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
+	}
+	b.WriteByte('\n')
+	for i, l := range p.layers {
+		fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
+	}
+	return b.String()
+}
+
+func (p *packet) packetDump() string {
+	var b bytes.Buffer
+	fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
+	for i, l := range p.layers {
+		fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
+	}
+	return b.String()
+}
+
+// eagerPacket is a packet implementation that does eager decoding.  Upon
+// initial construction, it decodes all the layers it can from packet data.
+// eagerPacket implements Packet and PacketBuilder.
+type eagerPacket struct {
+	packet
+}
+
+var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
+
+func (p *eagerPacket) NextDecoder(next Decoder) error {
+	if next == nil {
+		return errNilDecoder
+	}
+	if p.last == nil {
+		return errors.New("NextDecoder called, but no layers added yet")
+	}
+	d := p.last.LayerPayload()
+	if len(d) == 0 {
+		return nil
+	}
+	// Since we're eager, immediately call the next decoder.
+	return next.Decode(d, p)
+}
+func (p *eagerPacket) initialDecode(dec Decoder) {
+	defer p.recoverDecodeError()
+	err := dec.Decode(p.data, p)
+	if err != nil {
+		p.addFinalDecodeError(err, nil)
+	}
+}
+func (p *eagerPacket) LinkLayer() LinkLayer {
+	return p.link
+}
+func (p *eagerPacket) NetworkLayer() NetworkLayer {
+	return p.network
+}
+func (p *eagerPacket) TransportLayer() TransportLayer {
+	return p.transport
+}
+func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
+	return p.application
+}
+func (p *eagerPacket) ErrorLayer() ErrorLayer {
+	return p.failure
+}
+func (p *eagerPacket) Layers() []Layer {
+	return p.layers
+}
+func (p *eagerPacket) Layer(t LayerType) Layer {
+	for _, l := range p.layers {
+		if l.LayerType() == t {
+			return l
+		}
+	}
+	return nil
+}
+func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
+	for _, l := range p.layers {
+		if lc.Contains(l.LayerType()) {
+			return l
+		}
+	}
+	return nil
+}
+func (p *eagerPacket) String() string { return p.packetString() }
+func (p *eagerPacket) Dump() string   { return p.packetDump() }
+
+// lazyPacket does lazy decoding on its packet data.  On construction it does
+// no initial decoding.  For each function call, it decodes only as many layers
+// as are necessary to compute the return value for that function.
+// lazyPacket implements Packet and PacketBuilder.
+type lazyPacket struct {
+	packet
+	next Decoder
+}
+
+func (p *lazyPacket) NextDecoder(next Decoder) error {
+	if next == nil {
+		return errNilDecoder
+	}
+	p.next = next
+	return nil
+}
+func (p *lazyPacket) decodeNextLayer() {
+	if p.next == nil {
+		return
+	}
+	d := p.data
+	if p.last != nil {
+		d = p.last.LayerPayload()
+	}
+	next := p.next
+	p.next = nil
+	// We've just set p.next to nil, so if we see we have no data, this should be
+	// the final call we get to decodeNextLayer if we return here.
+	if len(d) == 0 {
+		return
+	}
+	defer p.recoverDecodeError()
+	err := next.Decode(d, p)
+	if err != nil {
+		p.addFinalDecodeError(err, nil)
+	}
+}
+func (p *lazyPacket) LinkLayer() LinkLayer {
+	for p.link == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.link
+}
+func (p *lazyPacket) NetworkLayer() NetworkLayer {
+	for p.network == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.network
+}
+func (p *lazyPacket) TransportLayer() TransportLayer {
+	for p.transport == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.transport
+}
+func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
+	for p.application == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.application
+}
+func (p *lazyPacket) ErrorLayer() ErrorLayer {
+	for p.failure == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.failure
+}
+func (p *lazyPacket) Layers() []Layer {
+	for p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.layers
+}
+func (p *lazyPacket) Layer(t LayerType) Layer {
+	for _, l := range p.layers {
+		if l.LayerType() == t {
+			return l
+		}
+	}
+	numLayers := len(p.layers)
+	for p.next != nil {
+		p.decodeNextLayer()
+		for _, l := range p.layers[numLayers:] {
+			if l.LayerType() == t {
+				return l
+			}
+		}
+		numLayers = len(p.layers)
+	}
+	return nil
+}
+func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
+	for _, l := range p.layers {
+		if lc.Contains(l.LayerType()) {
+			return l
+		}
+	}
+	numLayers := len(p.layers)
+	for p.next != nil {
+		p.decodeNextLayer()
+		for _, l := range p.layers[numLayers:] {
+			if lc.Contains(l.LayerType()) {
+				return l
+			}
+		}
+		numLayers = len(p.layers)
+	}
+	return nil
+}
+func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
+func (p *lazyPacket) Dump() string   { p.Layers(); return p.packetDump() }
+
+// DecodeOptions tells gopacket how to decode a packet.
+type DecodeOptions struct {
+	// Lazy decoding decodes the minimum number of layers needed to return data
+	// for a packet at each function call.  Be careful using this with concurrent
+	// packet processors, as each call to packet.* could mutate the packet, and
+	// two concurrent function calls could interact poorly.
+	Lazy bool
+	// NoCopy decoding doesn't copy its input buffer into storage that's owned by
+	// the packet.  If you can guarantee that the bytes underlying the slice
+	// passed into NewPacket aren't going to be modified, this can be faster.  If
+	// there's any chance that those bytes WILL be changed, this will invalidate
+	// your packets.
+	NoCopy bool
+	// SkipDecodeRecovery skips over panic recovery during packet decoding.
+	// Normally, when packets decode, if a panic occurs, that panic is captured
+	// by a recover(), and a DecodeFailure layer is added to the packet detailing
+	// the issue.  If this flag is set, panics are instead allowed to continue up
+	// the stack.
+	SkipDecodeRecovery bool
+	// DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
+	// decoder. If true, we should try to decode layers after TCP in single packets.
+	// This is disabled by default because the reassembly package drives the decoding
+	// of TCP payload data after reassembly.
+	DecodeStreamsAsDatagrams bool
+}
+
+// Default decoding provides the safest (but slowest) method for decoding
+// packets.  It eagerly processes all layers (so it's concurrency-safe) and it
+// copies its input buffer upon creation of the packet (so the packet remains
+// valid if the underlying slice is modified.  Both of these take time,
+// though, so beware.  If you can guarantee that the packet will only be used
+// by one goroutine at a time, set Lazy decoding.  If you can guarantee that
+// the underlying slice won't change, set NoCopy decoding.
+var Default = DecodeOptions{}
+
+// Lazy is a DecodeOptions with just Lazy set.
+var Lazy = DecodeOptions{Lazy: true}
+
+// NoCopy is a DecodeOptions with just NoCopy set.
+var NoCopy = DecodeOptions{NoCopy: true}
+
+// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
+var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
+
+// NewPacket creates a new Packet object from a set of bytes.  The
+// firstLayerDecoder tells it how to interpret the first layer from the bytes,
+// future layers will be generated from that first layer automatically.
+func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet {
+	if !options.NoCopy {
+		dataCopy := make([]byte, len(data))
+		copy(dataCopy, data)
+		data = dataCopy
+	}
+	if options.Lazy {
+		p := &lazyPacket{
+			packet: packet{data: data, decodeOptions: options},
+			next:   firstLayerDecoder,
+		}
+		p.layers = p.initialLayers[:0]
+		// Crazy craziness:
+		// If the following return statemet is REMOVED, and Lazy is FALSE, then
+		// eager packet processing becomes 17% FASTER.  No, there is no logical
+		// explanation for this.  However, it's such a hacky micro-optimization that
+		// we really can't rely on it.  It appears to have to do with the size the
+		// compiler guesses for this function's stack space, since one symptom is
+		// that with the return statement in place, we more than double calls to
+		// runtime.morestack/runtime.lessstack.  We'll hope the compiler gets better
+		// over time and we get this optimization for free.  Until then, we'll have
+		// to live with slower packet processing.
+		return p
+	}
+	p := &eagerPacket{
+		packet: packet{data: data, decodeOptions: options},
+	}
+	p.layers = p.initialLayers[:0]
+	p.initialDecode(firstLayerDecoder)
+	return p
+}
+
+// PacketDataSource is an interface for some source of packet data.  Users may
+// create their own implementations, or use the existing implementations in
+// gopacket/pcap (libpcap, allows reading from live interfaces or from
+// pcap files) or gopacket/pfring (PF_RING, allows reading from live
+// interfaces).
+type PacketDataSource interface {
+	// ReadPacketData returns the next packet available from this data source.
+	// It returns:
+	//  data:  The bytes of an individual packet.
+	//  ci:  Metadata about the capture
+	//  err:  An error encountered while reading packet data.  If err != nil,
+	//    then data/ci will be ignored.
+	ReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
+// of internal PacketDataSources, each of which will stop with io.EOF after
+// reading a finite number of packets.  The returned PacketDataSource will
+// return all packets from the first finite source, followed by all packets from
+// the second, etc.  Once all finite sources have returned io.EOF, the returned
+// source will as well.
+func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
+	c := concat(pds)
+	return &c
+}
+
+type concat []PacketDataSource
+
+func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
+	for len(*c) > 0 {
+		data, ci, err = (*c)[0].ReadPacketData()
+		if err == io.EOF {
+			*c = (*c)[1:]
+			continue
+		}
+		return
+	}
+	return nil, CaptureInfo{}, io.EOF
+}
+
+// ZeroCopyPacketDataSource is an interface to pull packet data from sources
+// that allow data to be returned without copying to a user-controlled buffer.
+// It's very similar to PacketDataSource, except that the caller must be more
+// careful in how the returned buffer is handled.
+type ZeroCopyPacketDataSource interface {
+	// ZeroCopyReadPacketData returns the next packet available from this data source.
+	// It returns:
+	//  data:  The bytes of an individual packet.  Unlike with
+	//    PacketDataSource's ReadPacketData, the slice returned here points
+	//    to a buffer owned by the data source.  In particular, the bytes in
+	//    this buffer may be changed by future calls to
+	//    ZeroCopyReadPacketData.  Do not use the returned buffer after
+	//    subsequent ZeroCopyReadPacketData calls.
+	//  ci:  Metadata about the capture
+	//  err:  An error encountered while reading packet data.  If err != nil,
+	//    then data/ci will be ignored.
+	ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// PacketSource reads in packets from a PacketDataSource, decodes them, and
+// returns them.
+//
+// There are currently two different methods for reading packets in through
+// a PacketSource:
+//
+// Reading With Packets Function
+//
+// This method is the most convenient and easiest to code, but lacks
+// flexibility.  Packets returns a 'chan Packet', then asynchronously writes
+// packets into that channel.  Packets uses a blocking channel, and closes
+// it if an io.EOF is returned by the underlying PacketDataSource.  All other
+// PacketDataSource errors are ignored and discarded.
+//  for packet := range packetSource.Packets() {
+//    ...
+//  }
+//
+// Reading With NextPacket Function
+//
+// This method is the most flexible, and exposes errors that may be
+// encountered by the underlying PacketDataSource.  It's also the fastest
+// in a tight loop, since it doesn't have the overhead of a channel
+// read/write.  However, it requires the user to handle errors, most
+// importantly the io.EOF error in cases where packets are being read from
+// a file.
+//  for {
+//    packet, err := packetSource.NextPacket()
+//    if err == io.EOF {
+//      break
+//    } else if err != nil {
+//      log.Println("Error:", err)
+//      continue
+//    }
+//    handlePacket(packet)  // Do something with each packet.
+//  }
+type PacketSource struct {
+	source  PacketDataSource
+	decoder Decoder
+	// DecodeOptions is the set of options to use for decoding each piece
+	// of packet data.  This can/should be changed by the user to reflect the
+	// way packets should be decoded.
+	DecodeOptions
+	c chan Packet
+}
+
+// NewPacketSource creates a packet data source.
+func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource {
+	return &PacketSource{
+		source:  source,
+		decoder: decoder,
+	}
+}
+
+// NextPacket returns the next decoded packet from the PacketSource.  On error,
+// it returns a nil packet and a non-nil error.
+func (p *PacketSource) NextPacket() (Packet, error) {
+	data, ci, err := p.source.ReadPacketData()
+	if err != nil {
+		return nil, err
+	}
+	packet := NewPacket(data, p.decoder, p.DecodeOptions)
+	m := packet.Metadata()
+	m.CaptureInfo = ci
+	m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
+	return packet, nil
+}
+
+// packetsToChannel reads in all packets from the packet source and sends them
+// to the given channel. This routine terminates when a non-temporary error
+// is returned by NextPacket().
+func (p *PacketSource) packetsToChannel() {
+	defer close(p.c)
+	for {
+		packet, err := p.NextPacket()
+		if err == nil {
+			p.c <- packet
+			continue
+		}
+
+		// Immediately retry for temporary network errors
+		if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
+			continue
+		}
+
+		// Immediately retry for EAGAIN
+		if err == syscall.EAGAIN {
+			continue
+		}
+
+		// Immediately break for known unrecoverable errors
+		if err == io.EOF || err == io.ErrUnexpectedEOF ||
+			err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer ||
+			err == syscall.EBADF ||
+			strings.Contains(err.Error(), "use of closed file") {
+			break
+		}
+
+		// Sleep briefly and try again
+		time.Sleep(time.Millisecond * time.Duration(5))
+	}
+}
+
+// Packets returns a channel of packets, allowing easy iterating over
+// packets.  Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel.  If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+//
+//  for packet := range packetSource.Packets() {
+//    handlePacket(packet)  // Do something with each packet.
+//  }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) Packets() chan Packet {
+	if p.c == nil {
+		p.c = make(chan Packet, 1000)
+		go p.packetsToChannel()
+	}
+	return p.c
+}
diff --git a/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/google/gopacket/parser.go
new file mode 100644
index 0000000..e5dc0e4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/parser.go
@@ -0,0 +1,207 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// DecodingLayer is an interface for packet layers that can decode themselves.
+//
+// The important part of DecodingLayer is that they decode themselves in-place.
+// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
+// the new state defined by the data passed in.  A returned error leaves the
+// DecodingLayer in an unknown intermediate state, thus its fields should not be
+// trusted.
+//
+// Because the DecodingLayer is resetting its own fields, a call to
+// DecodeFromBytes should normally not require any memory allocation.
+type DecodingLayer interface {
+	// DecodeFromBytes resets the internal state of this layer to the state
+	// defined by the passed-in bytes.  Slices in the DecodingLayer may
+	// reference the passed-in data, so care should be taken to copy it
+	// first should later modification of data be required before the
+	// DecodingLayer is discarded.
+	DecodeFromBytes(data []byte, df DecodeFeedback) error
+	// CanDecode returns the set of LayerTypes this DecodingLayer can
+	// decode.  For Layers that are also DecodingLayers, this will most
+	// often be that Layer's LayerType().
+	CanDecode() LayerClass
+	// NextLayerType returns the LayerType which should be used to decode
+	// the LayerPayload.
+	NextLayerType() LayerType
+	// LayerPayload is the set of bytes remaining to decode after a call to
+	// DecodeFromBytes.
+	LayerPayload() []byte
+}
+
+// DecodingLayerParser parses a given set of layer types.  See DecodeLayers for
+// more information on how DecodingLayerParser should be used.
+type DecodingLayerParser struct {
+	// DecodingLayerParserOptions is the set of options available to the
+	// user to define the parser's behavior.
+	DecodingLayerParserOptions
+	first    LayerType
+	decoders map[LayerType]DecodingLayer
+	df       DecodeFeedback
+	// Truncated is set when a decode layer detects that the packet has been
+	// truncated.
+	Truncated bool
+}
+
+// AddDecodingLayer adds a decoding layer to the parser.  This adds support for
+// the decoding layer's CanDecode layers to the parser... should they be
+// encountered, they'll be parsed.
+func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
+	for _, typ := range d.CanDecode().LayerTypes() {
+		l.decoders[typ] = d
+	}
+}
+
+// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
+// DecodingLayerParser.  Users should simply read Truncated after calling
+// DecodeLayers.
+func (l *DecodingLayerParser) SetTruncated() {
+	l.Truncated = true
+}
+
+// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
+// of the given DecodingLayers with AddDecodingLayer.
+//
+// Each call to DecodeLayers will attempt to decode the given bytes first by
+// treating them as a 'first'-type layer, then by using NextLayerType on
+// subsequently decoded layers to find the next relevant decoder.  Should a
+// deoder not be available for the layer type returned by NextLayerType,
+// decoding will stop.
+func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
+	dlp := &DecodingLayerParser{
+		decoders: make(map[LayerType]DecodingLayer),
+		first:    first,
+	}
+	dlp.df = dlp // Cast this once to the interface
+	for _, d := range decoders {
+		dlp.AddDecodingLayer(d)
+	}
+	return dlp
+}
+
+// DecodeLayers decodes as many layers as possible from the given data.  It
+// initially treats the data as layer type 'typ', then uses NextLayerType on
+// each subsequent decoded layer until it gets to a layer type it doesn't know
+// how to parse.
+//
+// For each layer successfully decoded, DecodeLayers appends the layer type to
+// the decoded slice.  DecodeLayers truncates the 'decoded' slice initially, so
+// there's no need to empty it yourself.
+//
+// This decoding method is about an order of magnitude faster than packet
+// decoding, because it only decodes known layers that have already been
+// allocated.  This means it doesn't need to allocate each layer it returns...
+// instead it overwrites the layers that already exist.
+//
+// Example usage:
+//    func main() {
+//      var eth layers.Ethernet
+//      var ip4 layers.IPv4
+//      var ip6 layers.IPv6
+//      var tcp layers.TCP
+//      var udp layers.UDP
+//      var payload gopacket.Payload
+//      parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp, &udp, &payload)
+//      var source gopacket.PacketDataSource = getMyDataSource()
+//      decodedLayers := make([]gopacket.LayerType, 0, 10)
+//      for {
+//        data, _, err := source.ReadPacketData()
+//        if err != nil {
+//          fmt.Println("Error reading packet data: ", err)
+//          continue
+//        }
+//        fmt.Println("Decoding packet")
+//        err = parser.DecodeLayers(data, &decodedLayers)
+//        for _, typ := range decodedLayers {
+//          fmt.Println("  Successfully decoded layer type", typ)
+//          switch typ {
+//            case layers.LayerTypeEthernet:
+//              fmt.Println("    Eth ", eth.SrcMAC, eth.DstMAC)
+//            case layers.LayerTypeIPv4:
+//              fmt.Println("    IP4 ", ip4.SrcIP, ip4.DstIP)
+//            case layers.LayerTypeIPv6:
+//              fmt.Println("    IP6 ", ip6.SrcIP, ip6.DstIP)
+//            case layers.LayerTypeTCP:
+//              fmt.Println("    TCP ", tcp.SrcPort, tcp.DstPort)
+//            case layers.LayerTypeUDP:
+//              fmt.Println("    UDP ", udp.SrcPort, udp.DstPort)
+//          }
+//        }
+//        if decodedLayers.Truncated {
+//          fmt.Println("  Packet has been truncated")
+//        }
+//        if err != nil {
+//          fmt.Println("  Error encountered:", err)
+//        }
+//      }
+//    }
+//
+// If DecodeLayers is unable to decode the next layer type, it will return the
+// error UnsupportedLayerType.
+func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
+	l.Truncated = false
+	if !l.IgnorePanic {
+		defer panicToError(&err)
+	}
+	typ := l.first
+	*decoded = (*decoded)[:0] // Truncated decoded layers.
+	for len(data) > 0 {
+		decoder, ok := l.decoders[typ]
+		if !ok {
+			if l.IgnoreUnsupported {
+				return nil
+			}
+			return UnsupportedLayerType(typ)
+		} else if err = decoder.DecodeFromBytes(data, l.df); err != nil {
+			return err
+		}
+		*decoded = append(*decoded, typ)
+		typ = decoder.NextLayerType()
+		data = decoder.LayerPayload()
+	}
+	return nil
+}
+
+// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
+// encounters a layer type that the DecodingLayerParser has no decoder for.
+type UnsupportedLayerType LayerType
+
+// Error implements the error interface, returning a string to say that the
+// given layer type is unsupported.
+func (e UnsupportedLayerType) Error() string {
+	return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
+}
+
+func panicToError(e *error) {
+	if r := recover(); r != nil {
+		*e = fmt.Errorf("panic: %v", r)
+	}
+}
+
+// DecodingLayerParserOptions provides options to affect the behavior of a given
+// DecodingLayerParser.
+type DecodingLayerParserOptions struct {
+	// IgnorePanic determines whether a DecodingLayerParser should stop
+	// panics on its own (by returning them as an error from DecodeLayers)
+	// or should allow them to raise up the stack.  Handling errors does add
+	// latency to the process of decoding layers, but is much safer for
+	// callers.  IgnorePanic defaults to false, thus if the caller does
+	// nothing decode panics will be returned as errors.
+	IgnorePanic bool
+	// IgnoreUnsupported will stop parsing and return a nil error when it
+	// encounters a layer it doesn't have a parser for, instead of returning an
+	// UnsupportedLayerType error.  If this is true, it's up to the caller to make
+	// sure that all expected layers have been parsed (by checking the decoded
+	// slice).
+	IgnoreUnsupported bool
+}
diff --git a/vendor/github.com/google/gopacket/pcap/defs_windows_386.go b/vendor/github.com/google/gopacket/pcap/defs_windows_386.go
new file mode 100644
index 0000000..774e907
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/defs_windows_386.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// This file contains necessary structs/constants generated from libpcap headers with cgo -godefs
+// generated with: generate_defs.exe
+// DO NOT MODIFY
+
+package pcap
+
+import "syscall"
+
+const errorBufferSize = 0x100
+
+const (
+	pcapErrorNotActivated    = -0x3
+	pcapErrorActivated       = -0x4
+	pcapWarningPromisc       = 0x2
+	pcapErrorNoSuchDevice    = -0x5
+	pcapErrorDenied          = -0x8
+	pcapErrorNotUp           = -0x9
+	pcapError                = -0x1
+	pcapWarning              = 0x1
+	pcapDIN                  = 0x1
+	pcapDOUT                 = 0x2
+	pcapDINOUT               = 0x0
+	pcapNetmaskUnknown       = 0xffffffff
+	pcapTstampPrecisionMicro = 0x0
+	pcapTstampPrecisionNano  = 0x1
+)
+
+type timeval struct {
+	Sec  int32
+	Usec int32
+}
+type pcapPkthdr struct {
+	Ts     timeval
+	Caplen uint32
+	Len    uint32
+}
+type pcapTPtr uintptr
+type pcapBpfInstruction struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+type pcapBpfProgram struct {
+	Len   uint32
+	Insns *pcapBpfInstruction
+}
+type pcapStats struct {
+	Recv   uint32
+	Drop   uint32
+	Ifdrop uint32
+}
+type pcapCint int32
+type pcapIf struct {
+	Next        *pcapIf
+	Name        *int8
+	Description *int8
+	Addresses   *pcapAddr
+	Flags       uint32
+}
+
+type pcapAddr struct {
+	Next      *pcapAddr
+	Addr      *syscall.RawSockaddr
+	Netmask   *syscall.RawSockaddr
+	Broadaddr *syscall.RawSockaddr
+	Dstaddr   *syscall.RawSockaddr
+}
diff --git a/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go b/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go
new file mode 100644
index 0000000..9619215
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go
@@ -0,0 +1,76 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// This file contains necessary structs/constants generated from libpcap headers with cgo -godefs
+// generated with: generate_defs.exe
+// DO NOT MODIFY
+
+package pcap
+
+import "syscall"
+
+const errorBufferSize = 0x100
+
+const (
+	pcapErrorNotActivated    = -0x3
+	pcapErrorActivated       = -0x4
+	pcapWarningPromisc       = 0x2
+	pcapErrorNoSuchDevice    = -0x5
+	pcapErrorDenied          = -0x8
+	pcapErrorNotUp           = -0x9
+	pcapError                = -0x1
+	pcapWarning              = 0x1
+	pcapDIN                  = 0x1
+	pcapDOUT                 = 0x2
+	pcapDINOUT               = 0x0
+	pcapNetmaskUnknown       = 0xffffffff
+	pcapTstampPrecisionMicro = 0x0
+	pcapTstampPrecisionNano  = 0x1
+)
+
+type timeval struct {
+	Sec  int32
+	Usec int32
+}
+type pcapPkthdr struct {
+	Ts     timeval
+	Caplen uint32
+	Len    uint32
+}
+type pcapTPtr uintptr
+type pcapBpfInstruction struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+type pcapBpfProgram struct {
+	Len       uint32
+	Pad_cgo_0 [4]byte
+	Insns     *pcapBpfInstruction
+}
+type pcapStats struct {
+	Recv   uint32
+	Drop   uint32
+	Ifdrop uint32
+}
+type pcapCint int32
+type pcapIf struct {
+	Next        *pcapIf
+	Name        *int8
+	Description *int8
+	Addresses   *pcapAddr
+	Flags       uint32
+	Pad_cgo_0   [4]byte
+}
+
+type pcapAddr struct {
+	Next      *pcapAddr
+	Addr      *syscall.RawSockaddr
+	Netmask   *syscall.RawSockaddr
+	Broadaddr *syscall.RawSockaddr
+	Dstaddr   *syscall.RawSockaddr
+}
diff --git a/vendor/github.com/google/gopacket/pcap/doc.go b/vendor/github.com/google/gopacket/pcap/doc.go
new file mode 100644
index 0000000..38b3141
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/doc.go
@@ -0,0 +1,112 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package pcap allows users of gopacket to read packets off the wire or from
+pcap files.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket, although it can also be used independently
+if you just want to get packet data from the wire.
+
+Depending on libpcap version, os support, or file timestamp resolution,
+nanosecond resolution is used for the internal timestamps. Returned timestamps
+are always scaled to nanosecond resolution due to the usage of time.Time.
+libpcap must be at least version 1.5 to support nanosecond timestamps. OpenLive
+supports only microsecond resolution.
+
+Reading PCAP Files
+
+The following code can be used to read in data from a pcap file.
+
+ if handle, err := pcap.OpenOffline("/path/to/my/file"); err != nil {
+   panic(err)
+ } else {
+   packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
+   for packet := range packetSource.Packets() {
+     handlePacket(packet)  // Do something with a packet here.
+   }
+ }
+
+Reading Live Packets
+
+The following code can be used to read in data from a live device, in this case
+"eth0". Be aware, that OpenLive only supports microsecond resolution.
+
+ if handle, err := pcap.OpenLive("eth0", 1600, true, pcap.BlockForever); err != nil {
+   panic(err)
+ } else if err := handle.SetBPFFilter("tcp and port 80"); err != nil {  // optional
+   panic(err)
+ } else {
+   packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
+   for packet := range packetSource.Packets() {
+     handlePacket(packet)  // Do something with a packet here.
+   }
+ }
+
+Inactive Handles
+
+Newer PCAP functionality requires the concept of an 'inactive' PCAP handle.
+Instead of constantly adding new arguments to pcap_open_live, users now call
+pcap_create to create a handle, set it up with a bunch of optional function
+calls, then call pcap_activate to activate it.  This library mirrors that
+mechanism, for those that want to expose/use these new features:
+
+  inactive, err := pcap.NewInactiveHandle(deviceName)
+  if err != nil {
+    log.Fatal(err)
+  }
+  defer inactive.CleanUp()
+
+  // Call various functions on inactive to set it up the way you'd like:
+  if err = inactive.SetTimeout(time.Minute); err != nil {
+    log.Fatal(err)
+  } else if err = inactive.SetTimestampSource("foo"); err != nil {
+    log.Fatal(err)
+  }
+
+  // Finally, create the actual handle by calling Activate:
+  handle, err := inactive.Activate()  // after this, inactive is no longer valid
+  if err != nil {
+    log.Fatal(err)
+  }
+  defer handle.Close()
+
+  // Now use your handle as you see fit.
+
+PCAP Timeouts
+
+pcap.OpenLive and pcap.SetTimeout both take timeouts.
+If you don't care about timeouts, just pass in BlockForever,
+which should do what you expect with minimal fuss.
+
+A timeout of 0 is not recommended.  Some platforms, like Macs
+(http://www.manpages.info/macosx/pcap.3.html) say:
+  The read timeout is used to arrange that the read not necessarily return
+  immediately when a packet is seen, but that it wait for some amount of time
+  to allow more packets to arrive and to read multiple packets from the OS
+  kernel in one operation.
+This means that if you only capture one packet, the kernel might decide to wait
+'timeout' for more packets to batch with it before returning.  A timeout of
+0, then, means 'wait forever for more packets', which is... not good.
+
+To get around this, we've introduced the following behavior:  if a negative
+timeout is passed in, we set the positive timeout in the handle, then loop
+internally in ReadPacketData/ZeroCopyReadPacketData when we see timeout
+errors.
+
+PCAP File Writing
+
+This package does not implement PCAP file writing.  However, gopacket/pcapgo
+does!  Look there if you'd like to write PCAP files.
+
+Note For Windows Users
+
+gopacket can use winpcap or npcap. If both are installed at the same time,
+npcap is preferred. Make sure the right windows service is loaded (npcap for npcap
+and npf for winpcap).
+*/
+package pcap
diff --git a/vendor/github.com/google/gopacket/pcap/pcap.go b/vendor/github.com/google/gopacket/pcap/pcap.go
new file mode 100644
index 0000000..6a4ef63
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap.go
@@ -0,0 +1,866 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package pcap
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"runtime"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"time"
+	"unsafe"
+
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+)
+
+// ErrNotActive is returned if handle is not activated
+const ErrNotActive = pcapErrorNotActivated
+
+// MaxBpfInstructions is the maximum number of BPF instructions supported (BPF_MAXINSNS),
+// taken from Linux kernel: include/uapi/linux/bpf_common.h
+//
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/bpf_common.h
+const MaxBpfInstructions = 4096
+
+// 8 bytes per instruction, max 4096 instructions
+const bpfInstructionBufferSize = 8 * MaxBpfInstructions
+
+// Handle provides a connection to a pcap handle, allowing users to read packets
+// off the wire (Next), inject packets onto the wire (Inject), and
+// perform a number of other functions to affect and understand packet output.
+//
+// Handles are already pcap_activate'd
+type Handle struct {
+	// stop is set to a non-zero value by Handle.Close to signal to
+	// getNextBufPtrLocked to stop trying to read packets
+	// This must be the first entry to ensure alignment for sync.atomic
+	stop uint64
+	// cptr is the handle for the actual pcap C object.
+	cptr           pcapTPtr
+	timeout        time.Duration
+	device         string
+	deviceIndex    int
+	mu             sync.Mutex
+	closeMu        sync.Mutex
+	nanoSecsFactor int64
+
+	// Since pointers to these objects are passed into a C function, if
+	// they're declared locally then the Go compiler thinks they may have
+	// escaped into C-land, so it allocates them on the heap.  This causes a
+	// huge memory hit, so to handle that we store them here instead.
+	pkthdr *pcapPkthdr
+	bufptr *uint8
+}
+
+// Stats contains statistics on how many packets were handled by a pcap handle,
+// and what was done with those packets.
+type Stats struct {
+	PacketsReceived  int
+	PacketsDropped   int
+	PacketsIfDropped int
+}
+
+// Interface describes a single network interface on a machine.
+type Interface struct {
+	Name        string
+	Description string
+	Flags       uint32
+	Addresses   []InterfaceAddress
+}
+
+// Datalink describes the datalink
+type Datalink struct {
+	Name        string
+	Description string
+}
+
+// InterfaceAddress describes an address associated with an Interface.
+// Currently, it's IPv4/6 specific.
+type InterfaceAddress struct {
+	IP        net.IP
+	Netmask   net.IPMask // Netmask may be nil if we were unable to retrieve it.
+	Broadaddr net.IP     // Broadcast address for this IP may be nil
+	P2P       net.IP     // P2P destination address for this IP may be nil
+}
+
+// BPF is a compiled filter program, useful for offline packet matching.
+type BPF struct {
+	orig string
+	bpf  pcapBpfProgram // takes a finalizer, not overriden by outsiders
+	hdr  pcapPkthdr     // allocate on the heap to enable optimizations
+}
+
+// BPFInstruction is a byte encoded structure holding a BPF instruction
+type BPFInstruction struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+// BlockForever causes it to block forever waiting for packets, when passed
+// into SetTimeout or OpenLive, while still returning incoming packets to userland relatively
+// quickly.
+const BlockForever = -time.Millisecond * 10
+
+func timeoutMillis(timeout time.Duration) int {
+	// Flip sign if necessary.  See package docs on timeout for reasoning behind this.
+	if timeout < 0 {
+		timeout *= -1
+	}
+	// Round up
+	if timeout != 0 && timeout < time.Millisecond {
+		timeout = time.Millisecond
+	}
+	return int(timeout / time.Millisecond)
+}
+
+// OpenLive opens a device and returns a *Handle.
+// It takes as arguments the name of the device ("eth0"), the maximum size to
+// read for each packet (snaplen), whether to put the interface in promiscuous
+// mode, and a timeout. Warning: this function supports only microsecond timestamps.
+// For nanosecond resolution use an InactiveHandle.
+//
+// See the package documentation for important details regarding 'timeout'.
+func OpenLive(device string, snaplen int32, promisc bool, timeout time.Duration) (handle *Handle, _ error) {
+	var pro int
+	if promisc {
+		pro = 1
+	}
+
+	p, err := pcapOpenLive(device, int(snaplen), pro, timeoutMillis(timeout))
+	if err != nil {
+		return nil, err
+	}
+	p.timeout = timeout
+	p.device = device
+
+	ifc, err := net.InterfaceByName(device)
+	if err != nil {
+		// The device wasn't found in the OS, but could be "any"
+		// Set index to 0
+		p.deviceIndex = 0
+	} else {
+		p.deviceIndex = ifc.Index
+	}
+
+	p.nanoSecsFactor = 1000
+
+	// Only set the PCAP handle into non-blocking mode if we have a timeout
+	// greater than zero. If the user wants to block forever, we'll let libpcap
+	// handle that.
+	if p.timeout > 0 {
+		if err := p.setNonBlocking(); err != nil {
+			p.pcapClose()
+			return nil, err
+		}
+	}
+
+	return p, nil
+}
+
+// OpenOffline opens a file and returns its contents as a *Handle. Depending on libpcap support and
+// on the timestamp resolution used in the file, nanosecond or microsecond resolution is used
+// internally. All returned timestamps are scaled to nanosecond resolution. Resolution() can be used
+// to query the actual resolution used.
+func OpenOffline(file string) (handle *Handle, err error) {
+	handle, err = openOffline(file)
+	if err != nil {
+		return
+	}
+	if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+		handle.nanoSecsFactor = 1
+	} else {
+		handle.nanoSecsFactor = 1000
+	}
+	return
+}
+
+// OpenOfflineFile returns contents of input file as a *Handle. Depending on libpcap support and
+// on the timestamp resolution used in the file, nanosecond or microsecond resolution is used
+// internally. All returned timestamps are scaled to nanosecond resolution. Resolution() can be used
+// to query the actual resolution used.
+func OpenOfflineFile(file *os.File) (handle *Handle, err error) {
+	handle, err = openOfflineFile(file)
+	if err != nil {
+		return
+	}
+	if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+		handle.nanoSecsFactor = 1
+	} else {
+		handle.nanoSecsFactor = 1000
+	}
+	return
+}
+
+// NextError is the return code from a call to Next.
+type NextError int32
+
+// NextError implements the error interface.
+func (n NextError) Error() string {
+	switch n {
+	case NextErrorOk:
+		return "OK"
+	case NextErrorTimeoutExpired:
+		return "Timeout Expired"
+	case NextErrorReadError:
+		return "Read Error"
+	case NextErrorNoMorePackets:
+		return "No More Packets In File"
+	case NextErrorNotActivated:
+		return "Not Activated"
+	}
+	return strconv.Itoa(int(n))
+}
+
+// NextError values.
+const (
+	NextErrorOk             NextError = 1
+	NextErrorTimeoutExpired NextError = 0
+	NextErrorReadError      NextError = -1
+	// NextErrorNoMorePackets is returned when reading from a file (OpenOffline) and
+	// EOF is reached.  When this happens, Next() returns io.EOF instead of this.
+	NextErrorNoMorePackets NextError = -2
+	NextErrorNotActivated  NextError = -3
+)
+
+// ReadPacketData returns the next packet read from the pcap handle, along with an error
+// code associated with that packet.  If the packet is read successfully, the
+// returned error is nil.
+func (p *Handle) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
+	p.mu.Lock()
+	err = p.getNextBufPtrLocked(&ci)
+	if err == nil {
+		data = make([]byte, ci.CaptureLength)
+		copy(data, (*(*[1 << 30]byte)(unsafe.Pointer(p.bufptr)))[:])
+	}
+	p.mu.Unlock()
+	if err == NextErrorTimeoutExpired {
+		runtime.Gosched()
+	}
+	return
+}
+
+type activateError int
+
+const (
+	aeNoError      = activateError(0)
+	aeActivated    = activateError(pcapErrorActivated)
+	aePromisc      = activateError(pcapWarningPromisc)
+	aeNoSuchDevice = activateError(pcapErrorNoSuchDevice)
+	aeDenied       = activateError(pcapErrorDenied)
+	aeNotUp        = activateError(pcapErrorNotUp)
+	aeWarning      = activateError(pcapWarning)
+)
+
+func (a activateError) Error() string {
+	switch a {
+	case aeNoError:
+		return "No Error"
+	case aeActivated:
+		return "Already Activated"
+	case aePromisc:
+		return "Cannot set as promisc"
+	case aeNoSuchDevice:
+		return "No Such Device"
+	case aeDenied:
+		return "Permission Denied"
+	case aeNotUp:
+		return "Interface Not Up"
+	case aeWarning:
+		return fmt.Sprintf("Warning: %v", activateErrMsg.Error())
+	default:
+		return fmt.Sprintf("unknown activated error: %d", a)
+	}
+}
+
+// getNextBufPtrLocked is shared code for ReadPacketData and
+// ZeroCopyReadPacketData.
+func (p *Handle) getNextBufPtrLocked(ci *gopacket.CaptureInfo) error {
+	if !p.isOpen() {
+		return io.EOF
+	}
+
+	// set after we have call waitForPacket for the first time
+	var waited bool
+
+	for atomic.LoadUint64(&p.stop) == 0 {
+		// try to read a packet if one is immediately available
+		result := p.pcapNextPacketEx()
+
+		switch result {
+		case NextErrorOk:
+			sec := p.pkthdr.getSec()
+			// convert micros to nanos
+			nanos := int64(p.pkthdr.getUsec()) * p.nanoSecsFactor
+
+			ci.Timestamp = time.Unix(sec, nanos)
+			ci.CaptureLength = p.pkthdr.getCaplen()
+			ci.Length = p.pkthdr.getLen()
+			ci.InterfaceIndex = p.deviceIndex
+
+			return nil
+		case NextErrorNoMorePackets:
+			// no more packets, return EOF rather than libpcap-specific error
+			return io.EOF
+		case NextErrorTimeoutExpired:
+			// we've already waited for a packet and we're supposed to time out
+			//
+			// we should never actually hit this if we were passed BlockForever
+			// since we should block on C.pcap_next_ex until there's a packet
+			// to read.
+			if waited && p.timeout > 0 {
+				return result
+			}
+
+			// wait for packet before trying again
+			p.waitForPacket()
+			waited = true
+		default:
+			return result
+		}
+	}
+
+	// stop must be set
+	return io.EOF
+}
+
+// ZeroCopyReadPacketData reads the next packet off the wire, and returns its data.
+// The slice returned by ZeroCopyReadPacketData points to bytes owned by the
+// the Handle.  Each call to ZeroCopyReadPacketData invalidates any data previously
+// returned by ZeroCopyReadPacketData.  Care must be taken not to keep pointers
+// to old bytes when using ZeroCopyReadPacketData... if you need to keep data past
+// the next time you call ZeroCopyReadPacketData, use ReadPacketData, which copies
+// the bytes into a new buffer for you.
+//  data1, _, _ := handle.ZeroCopyReadPacketData()
+//  // do everything you want with data1 here, copying bytes out of it if you'd like to keep them around.
+//  data2, _, _ := handle.ZeroCopyReadPacketData()  // invalidates bytes in data1
+func (p *Handle) ZeroCopyReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
+	p.mu.Lock()
+	err = p.getNextBufPtrLocked(&ci)
+	if err == nil {
+		slice := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+		slice.Data = uintptr(unsafe.Pointer(p.bufptr))
+		slice.Len = ci.CaptureLength
+		slice.Cap = ci.CaptureLength
+	}
+	p.mu.Unlock()
+	if err == NextErrorTimeoutExpired {
+		runtime.Gosched()
+	}
+	return
+}
+
+// Close closes the underlying pcap handle.
+func (p *Handle) Close() {
+	p.closeMu.Lock()
+	defer p.closeMu.Unlock()
+
+	if !p.isOpen() {
+		return
+	}
+
+	atomic.StoreUint64(&p.stop, 1)
+
+	// wait for packet reader to stop
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	p.pcapClose()
+}
+
+// Error returns the current error associated with a pcap handle (pcap_geterr).
+func (p *Handle) Error() error {
+	return p.pcapGeterr()
+}
+
+// Stats returns statistics on the underlying pcap handle.
+func (p *Handle) Stats() (stat *Stats, err error) {
+	return p.pcapStats()
+}
+
+// ListDataLinks obtains a list of all possible data link types supported for an interface.
+func (p *Handle) ListDataLinks() (datalinks []Datalink, err error) {
+	return p.pcapListDatalinks()
+}
+
+// compileBPFFilter always returns an allocated C.struct_bpf_program
+// It is the callers responsibility to free the memory again, e.g.
+//
+//    C.pcap_freecode(&bpf)
+//
+func (p *Handle) compileBPFFilter(expr string) (pcapBpfProgram, error) {
+	var maskp = uint32(pcapNetmaskUnknown)
+
+	// Only do the lookup on network interfaces.
+	// No device indicates we're handling a pcap file.
+	if len(p.device) > 0 {
+		var err error
+		_, maskp, err = pcapLookupnet(p.device)
+		if err != nil {
+			// We can't lookup the network, but that could be because the interface
+			// doesn't have an IPv4.
+			maskp = uint32(pcapNetmaskUnknown)
+		}
+	}
+
+	return p.pcapCompile(expr, maskp)
+}
+
+// CompileBPFFilter compiles and returns a BPF filter with given a link type and capture length.
+func CompileBPFFilter(linkType layers.LinkType, captureLength int, expr string) ([]BPFInstruction, error) {
+	h, err := pcapOpenDead(linkType, captureLength)
+	if err != nil {
+		return nil, err
+	}
+	defer h.Close()
+	return h.CompileBPFFilter(expr)
+}
+
+// CompileBPFFilter compiles and returns a BPF filter for the pcap handle.
+func (p *Handle) CompileBPFFilter(expr string) ([]BPFInstruction, error) {
+	bpf, err := p.compileBPFFilter(expr)
+	defer bpf.free()
+	if err != nil {
+		return nil, err
+	}
+
+	return bpf.toBPFInstruction(), nil
+}
+
+// SetBPFFilter compiles and sets a BPF filter for the pcap handle.
+func (p *Handle) SetBPFFilter(expr string) (err error) {
+	bpf, err := p.compileBPFFilter(expr)
+	defer bpf.free()
+	if err != nil {
+		return err
+	}
+
+	return p.pcapSetfilter(bpf)
+}
+
+// SetBPFInstructionFilter may be used to apply a filter in BPF asm byte code format.
+//
+// Simplest way to generate BPF asm byte code is with tcpdump:
+//     tcpdump -dd 'udp'
+//
+// The output may be used directly to add a filter, e.g.:
+//     bpfInstructions := []pcap.BpfInstruction{
+//			{0x28, 0, 0, 0x0000000c},
+//			{0x15, 0, 9, 0x00000800},
+//			{0x30, 0, 0, 0x00000017},
+//			{0x15, 0, 7, 0x00000006},
+//			{0x28, 0, 0, 0x00000014},
+//			{0x45, 5, 0, 0x00001fff},
+//			{0xb1, 0, 0, 0x0000000e},
+//			{0x50, 0, 0, 0x0000001b},
+//			{0x54, 0, 0, 0x00000012},
+//			{0x15, 0, 1, 0x00000012},
+//			{0x6, 0, 0, 0x0000ffff},
+//			{0x6, 0, 0, 0x00000000},
+//		}
+//
+// An other posibility is to write the bpf code in bpf asm.
+// Documentation: https://www.kernel.org/doc/Documentation/networking/filter.txt
+//
+// To compile the code use bpf_asm from
+// https://github.com/torvalds/linux/tree/master/tools/net
+//
+// The following command may be used to convert bpf_asm output to c/go struct, usable for SetBPFFilterByte:
+// bpf_asm -c tcp.bpf
+func (p *Handle) SetBPFInstructionFilter(bpfInstructions []BPFInstruction) (err error) {
+	bpf, err := bpfInstructionFilter(bpfInstructions)
+	if err != nil {
+		return err
+	}
+	defer bpf.free()
+
+	return p.pcapSetfilter(bpf)
+}
+
+func bpfInstructionFilter(bpfInstructions []BPFInstruction) (bpf pcapBpfProgram, err error) {
+	if len(bpfInstructions) < 1 {
+		return bpf, errors.New("bpfInstructions must not be empty")
+	}
+
+	if len(bpfInstructions) > MaxBpfInstructions {
+		return bpf, fmt.Errorf("bpfInstructions must not be larger than %d", MaxBpfInstructions)
+	}
+
+	return pcapBpfProgramFromInstructions(bpfInstructions), nil
+}
+
+// NewBPF compiles the given string into a new filter program.
+//
+// BPF filters need to be created from activated handles, because they need to
+// know the underlying link type to correctly compile their offsets.
+func (p *Handle) NewBPF(expr string) (*BPF, error) {
+	bpf := &BPF{orig: expr}
+
+	var err error
+	bpf.bpf, err = p.pcapCompile(expr, pcapNetmaskUnknown)
+	if err != nil {
+		return nil, err
+	}
+
+	runtime.SetFinalizer(bpf, destroyBPF)
+	return bpf, nil
+}
+
+// NewBPF allows to create a BPF without requiring an existing handle.
+// This allows to match packets obtained from a-non GoPacket capture source
+// to be matched.
+//
+// 	buf := make([]byte, MaxFrameSize)
+// 	bpfi, _ := pcap.NewBPF(layers.LinkTypeEthernet, MaxFrameSize, "icmp")
+// 	n, _ := someIO.Read(buf)
+// 	ci := gopacket.CaptureInfo{CaptureLength: n, Length: n}
+// 	if bpfi.Matches(ci, buf) {
+// 		doSomething()
+// 	}
+func NewBPF(linkType layers.LinkType, captureLength int, expr string) (*BPF, error) {
+	h, err := pcapOpenDead(linkType, captureLength)
+	if err != nil {
+		return nil, err
+	}
+	defer h.Close()
+	return h.NewBPF(expr)
+}
+
+// NewBPFInstructionFilter sets the given BPFInstructions as new filter program.
+//
+// More details see func SetBPFInstructionFilter
+//
+// BPF filters need to be created from activated handles, because they need to
+// know the underlying link type to correctly compile their offsets.
+func (p *Handle) NewBPFInstructionFilter(bpfInstructions []BPFInstruction) (*BPF, error) {
+	var err error
+	bpf := &BPF{orig: "BPF Instruction Filter"}
+
+	bpf.bpf, err = bpfInstructionFilter(bpfInstructions)
+	if err != nil {
+		return nil, err
+	}
+
+	runtime.SetFinalizer(bpf, destroyBPF)
+	return bpf, nil
+}
+func destroyBPF(bpf *BPF) {
+	bpf.bpf.free()
+}
+
+// String returns the original string this BPF filter was compiled from.
+func (b *BPF) String() string {
+	return b.orig
+}
+
+// Matches returns true if the given packet data matches this filter.
+func (b *BPF) Matches(ci gopacket.CaptureInfo, data []byte) bool {
+	return b.pcapOfflineFilter(ci, data)
+}
+
+// Version returns pcap_lib_version.
+func Version() string {
+	return pcapLibVersion()
+}
+
+// LinkType returns pcap_datalink, as a layers.LinkType.
+func (p *Handle) LinkType() layers.LinkType {
+	return p.pcapDatalink()
+}
+
+// SetLinkType calls pcap_set_datalink on the pcap handle.
+func (p *Handle) SetLinkType(dlt layers.LinkType) error {
+	return p.pcapSetDatalink(dlt)
+}
+
+// DatalinkValToName returns pcap_datalink_val_to_name as string
+func DatalinkValToName(dlt int) string {
+	return pcapDatalinkValToName(dlt)
+}
+
+// DatalinkValToDescription returns pcap_datalink_val_to_description as string
+func DatalinkValToDescription(dlt int) string {
+	return pcapDatalinkValToDescription(dlt)
+}
+
+// DatalinkNameToVal returns pcap_datalink_name_to_val as int
+func DatalinkNameToVal(name string) int {
+	return pcapDatalinkNameToVal(name)
+}
+
+// FindAllDevs attempts to enumerate all interfaces on the current machine.
+func FindAllDevs() (ifs []Interface, err error) {
+	alldevsp, err := pcapFindAllDevs()
+	if err != nil {
+		return nil, err
+	}
+	defer alldevsp.free()
+
+	for alldevsp.next() {
+		var iface Interface
+		iface.Name = alldevsp.name()
+		iface.Description = alldevsp.description()
+		iface.Addresses = findalladdresses(alldevsp.addresses())
+		iface.Flags = alldevsp.flags()
+		ifs = append(ifs, iface)
+	}
+	return
+}
+
+func findalladdresses(addresses pcapAddresses) (retval []InterfaceAddress) {
+	// TODO - make it support more than IPv4 and IPv6?
+	retval = make([]InterfaceAddress, 0, 1)
+	for addresses.next() {
+		// Strangely, it appears that in some cases, we get a pcap address back from
+		// pcap_findalldevs with a nil .addr.  It appears that we can skip over
+		// these.
+		if addresses.addr() == nil {
+			continue
+		}
+		var a InterfaceAddress
+		var err error
+		if a.IP, err = sockaddrToIP(addresses.addr()); err != nil {
+			continue
+		}
+		// To be safe, we'll also check for netmask.
+		if addresses.netmask() == nil {
+			continue
+		}
+		if a.Netmask, err = sockaddrToIP(addresses.netmask()); err != nil {
+			// If we got an IP address but we can't get a netmask, just return the IP
+			// address.
+			a.Netmask = nil
+		}
+		if a.Broadaddr, err = sockaddrToIP(addresses.broadaddr()); err != nil {
+			a.Broadaddr = nil
+		}
+		if a.P2P, err = sockaddrToIP(addresses.dstaddr()); err != nil {
+			a.P2P = nil
+		}
+		retval = append(retval, a)
+	}
+	return
+}
+
+func sockaddrToIP(rsa *syscall.RawSockaddr) (IP []byte, err error) {
+	if rsa == nil {
+		err = errors.New("Value not set")
+		return
+	}
+	switch rsa.Family {
+	case syscall.AF_INET:
+		pp := (*syscall.RawSockaddrInet4)(unsafe.Pointer(rsa))
+		IP = make([]byte, 4)
+		for i := 0; i < len(IP); i++ {
+			IP[i] = pp.Addr[i]
+		}
+		return
+	case syscall.AF_INET6:
+		pp := (*syscall.RawSockaddrInet6)(unsafe.Pointer(rsa))
+		IP = make([]byte, 16)
+		for i := 0; i < len(IP); i++ {
+			IP[i] = pp.Addr[i]
+		}
+		return
+	}
+	err = errors.New("Unsupported address type")
+	return
+}
+
+// WritePacketData calls pcap_sendpacket, injecting the given data into the pcap handle.
+func (p *Handle) WritePacketData(data []byte) (err error) {
+	return p.pcapSendpacket(data)
+}
+
+// Direction is used by Handle.SetDirection.
+type Direction uint8
+
+// Direction values for Handle.SetDirection.
+const (
+	DirectionIn    = Direction(pcapDIN)
+	DirectionOut   = Direction(pcapDOUT)
+	DirectionInOut = Direction(pcapDINOUT)
+)
+
+// SetDirection sets the direction for which packets will be captured.
+func (p *Handle) SetDirection(direction Direction) error {
+	if direction != DirectionIn && direction != DirectionOut && direction != DirectionInOut {
+		return fmt.Errorf("Invalid direction: %v", direction)
+	}
+	return p.pcapSetdirection(direction)
+}
+
+// SnapLen returns the snapshot length
+func (p *Handle) SnapLen() int {
+	return p.pcapSnapshot()
+}
+
+// Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+func (p *Handle) Resolution() gopacket.TimestampResolution {
+	if p.nanoSecsFactor == 1 {
+		return gopacket.TimestampResolutionMicrosecond
+	}
+	return gopacket.TimestampResolutionNanosecond
+}
+
+// TimestampSource tells PCAP which type of timestamp to use for packets.
+type TimestampSource int
+
+// String returns the timestamp type as a human-readable string.
+func (t TimestampSource) String() string {
+	return t.pcapTstampTypeValToName()
+}
+
+// TimestampSourceFromString translates a string into a timestamp type, case
+// insensitive.
+func TimestampSourceFromString(s string) (TimestampSource, error) {
+	return pcapTstampTypeNameToVal(s)
+}
+
+// InactiveHandle allows you to call pre-pcap_activate functions on your pcap
+// handle to set it up just the way you'd like.
+type InactiveHandle struct {
+	// cptr is the handle for the actual pcap C object.
+	cptr        pcapTPtr
+	device      string
+	deviceIndex int
+	timeout     time.Duration
+}
+
+// holds the err messoge in case activation returned a Warning
+var activateErrMsg error
+
+// Error returns the current error associated with a pcap handle (pcap_geterr).
+func (p *InactiveHandle) Error() error {
+	return p.pcapGeterr()
+}
+
+// Activate activates the handle.  The current InactiveHandle becomes invalid
+// and all future function calls on it will fail.
+func (p *InactiveHandle) Activate() (*Handle, error) {
+	// ignore error with set_tstamp_precision, since the actual precision is queried later anyway
+	pcapSetTstampPrecision(p.cptr, pcapTstampPrecisionNano)
+	handle, err := p.pcapActivate()
+	if err != aeNoError {
+		if err == aeWarning {
+			activateErrMsg = p.Error()
+		}
+		return nil, err
+	}
+	handle.timeout = p.timeout
+	if p.timeout > 0 {
+		if err := handle.setNonBlocking(); err != nil {
+			handle.pcapClose()
+			return nil, err
+		}
+	}
+	handle.device = p.device
+	handle.deviceIndex = p.deviceIndex
+	if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+		handle.nanoSecsFactor = 1
+	} else {
+		handle.nanoSecsFactor = 1000
+	}
+	return handle, nil
+}
+
+// CleanUp cleans up any stuff left over from a successful or failed building
+// of a handle.
+func (p *InactiveHandle) CleanUp() {
+	p.pcapClose()
+}
+
+// NewInactiveHandle creates a new InactiveHandle, which wraps an un-activated PCAP handle.
+// Callers of NewInactiveHandle should immediately defer 'CleanUp', as in:
+//   inactive := NewInactiveHandle("eth0")
+//   defer inactive.CleanUp()
+func NewInactiveHandle(device string) (*InactiveHandle, error) {
+	// Try to get the interface index, but iy could be something like "any"
+	// in which case use 0, which doesn't exist in nature
+	deviceIndex := 0
+	ifc, err := net.InterfaceByName(device)
+	if err == nil {
+		deviceIndex = ifc.Index
+	}
+
+	// This copies a bunch of the pcap_open_live implementation from pcap.c:
+	handle, err := pcapCreate(device)
+	if err != nil {
+		return nil, err
+	}
+	handle.device = device
+	handle.deviceIndex = deviceIndex
+	return handle, nil
+}
+
+// SetSnapLen sets the snap length (max bytes per packet to capture).
+func (p *InactiveHandle) SetSnapLen(snaplen int) error {
+	return p.pcapSetSnaplen(snaplen)
+}
+
+// SetPromisc sets the handle to either be promiscuous (capture packets
+// unrelated to this host) or not.
+func (p *InactiveHandle) SetPromisc(promisc bool) error {
+	return p.pcapSetPromisc(promisc)
+}
+
+// SetTimeout sets the read timeout for the handle.
+//
+// See the package documentation for important details regarding 'timeout'.
+func (p *InactiveHandle) SetTimeout(timeout time.Duration) error {
+	err := p.pcapSetTimeout(timeout)
+	if err != nil {
+		return err
+	}
+	p.timeout = timeout
+	return nil
+}
+
+// SupportedTimestamps returns a list of supported timstamp types for this
+// handle.
+func (p *InactiveHandle) SupportedTimestamps() (out []TimestampSource) {
+	return p.pcapListTstampTypes()
+}
+
+// SetTimestampSource sets the type of timestamp generator PCAP uses when
+// attaching timestamps to packets.
+func (p *InactiveHandle) SetTimestampSource(t TimestampSource) error {
+	return p.pcapSetTstampType(t)
+}
+
+// CannotSetRFMon is returned by SetRFMon if the handle does not allow
+// setting RFMon because pcap_can_set_rfmon returns 0.
+var CannotSetRFMon = errors.New("Cannot set rfmon for this handle")
+
+// SetRFMon turns on radio monitoring mode, similar to promiscuous mode but for
+// wireless networks.  If this mode is enabled, the interface will not need to
+// associate with an access point before it can receive traffic.
+func (p *InactiveHandle) SetRFMon(monitor bool) error {
+	return p.pcapSetRfmon(monitor)
+}
+
+// SetBufferSize sets the buffer size (in bytes) of the handle.
+func (p *InactiveHandle) SetBufferSize(bufferSize int) error {
+	return p.pcapSetBufferSize(bufferSize)
+}
+
+// SetImmediateMode sets (or unsets) the immediate mode of the
+// handle. In immediate mode, packets are delivered to the application
+// as soon as they arrive.  In other words, this overrides SetTimeout.
+func (p *InactiveHandle) SetImmediateMode(mode bool) error {
+	return p.pcapSetImmediateMode(mode)
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap_unix.go b/vendor/github.com/google/gopacket/pcap/pcap_unix.go
new file mode 100644
index 0000000..4d6a4fb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap_unix.go
@@ -0,0 +1,709 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+// +build !windows
+
+package pcap
+
+import (
+	"errors"
+	"os"
+	"sync"
+	"syscall"
+	"time"
+	"unsafe"
+
+	"github.com/google/gopacket"
+
+	"github.com/google/gopacket/layers"
+)
+
+/*
+#cgo solaris LDFLAGS: -L /opt/local/lib -lpcap
+#cgo linux LDFLAGS: -lpcap
+#cgo dragonfly LDFLAGS: -lpcap
+#cgo freebsd LDFLAGS: -lpcap
+#cgo openbsd LDFLAGS: -lpcap
+#cgo netbsd LDFLAGS: -lpcap
+#cgo darwin LDFLAGS: -lpcap
+#include <stdlib.h>
+#include <pcap.h>
+#include <stdint.h>
+
+// Some old versions of pcap don't define this constant.
+#ifndef PCAP_NETMASK_UNKNOWN
+#define PCAP_NETMASK_UNKNOWN 0xffffffff
+#endif
+
+// libpcap doesn't actually export its version in a #define-guardable way,
+// so we have to use other defined things to differentiate versions.
+// We assume at least libpcap v1.1 at the moment.
+// See http://upstream-tracker.org/versions/libpcap.html
+
+#ifndef PCAP_ERROR_TSTAMP_PRECISION_NOTSUP  // < v1.5
+#define PCAP_ERROR_TSTAMP_PRECISION_NOTSUP -12
+
+int pcap_set_immediate_mode(pcap_t *p, int mode) {
+  return PCAP_ERROR;
+}
+
+//  libpcap version < v1.5 doesn't have timestamp precision (everything is microsecond)
+//
+//  This means *_tstamp_* functions and macros are missing. Therefore, we emulate these
+//  functions here and pretend the setting the precision works. This is actually the way
+//  the pcap_open_offline_with_tstamp_precision works, because it doesn't return an error
+//  if it was not possible to set the precision, which depends on support by the given file.
+//  => The rest of the functions always pretend as if they could set nano precision and
+//  verify the actual precision with pcap_get_tstamp_precision, which is emulated for <v1.5
+//  to always return micro resolution.
+
+#define PCAP_TSTAMP_PRECISION_MICRO	0
+#define PCAP_TSTAMP_PRECISION_NANO	1
+
+pcap_t *pcap_open_offline_with_tstamp_precision(const char *fname, u_int precision,
+  char *errbuf) {
+  return pcap_open_offline(fname, errbuf);
+}
+
+pcap_t *pcap_fopen_offline_with_tstamp_precision(FILE *fp, u_int precision,
+  char *errbuf) {
+  return pcap_fopen_offline(fp, errbuf);
+}
+
+int pcap_set_tstamp_precision(pcap_t *p, int tstamp_precision) {
+  if (tstamp_precision == PCAP_TSTAMP_PRECISION_MICRO)
+    return 0;
+  return PCAP_ERROR_TSTAMP_PRECISION_NOTSUP;
+}
+
+int pcap_get_tstamp_precision(pcap_t *p) {
+  return PCAP_TSTAMP_PRECISION_MICRO;
+}
+
+#ifndef PCAP_TSTAMP_HOST  // < v1.2
+
+int pcap_set_tstamp_type(pcap_t* p, int t) { return -1; }
+int pcap_list_tstamp_types(pcap_t* p, int** t) { return 0; }
+void pcap_free_tstamp_types(int *tstamp_types) {}
+const char* pcap_tstamp_type_val_to_name(int t) {
+	return "pcap timestamp types not supported";
+}
+int pcap_tstamp_type_name_to_val(const char* t) {
+	return PCAP_ERROR;
+}
+
+#endif  // < v1.2
+#endif  // < v1.5
+
+#ifndef PCAP_ERROR_PROMISC_PERM_DENIED
+#define PCAP_ERROR_PROMISC_PERM_DENIED -11
+#endif
+
+// Windows, Macs, and Linux all use different time types.  Joy.
+#ifdef __APPLE__
+#define gopacket_time_secs_t __darwin_time_t
+#define gopacket_time_usecs_t __darwin_suseconds_t
+#elif __ANDROID__
+#define gopacket_time_secs_t __kernel_time_t
+#define gopacket_time_usecs_t __kernel_suseconds_t
+#elif __GLIBC__
+#define gopacket_time_secs_t __time_t
+#define gopacket_time_usecs_t __suseconds_t
+#else  // Some form of linux/bsd/etc...
+#include <sys/param.h>
+#ifdef __OpenBSD__
+#define gopacket_time_secs_t u_int32_t
+#define gopacket_time_usecs_t u_int32_t
+#else
+#define gopacket_time_secs_t time_t
+#define gopacket_time_usecs_t suseconds_t
+#endif
+#endif
+
+// The things we do to avoid pointers escaping to the heap...
+// According to https://github.com/the-tcpdump-group/libpcap/blob/1131a7c26c6f4d4772e4a2beeaf7212f4dea74ac/pcap.c#L398-L406 ,
+// the return value of pcap_next_ex could be greater than 1 for success.
+// Let's just make it 1 if it comes bigger than 1.
+int pcap_next_ex_escaping(pcap_t *p, uintptr_t pkt_hdr, uintptr_t pkt_data) {
+  int ex = pcap_next_ex(p, (struct pcap_pkthdr**)(pkt_hdr), (const u_char**)(pkt_data));
+  if (ex > 1) {
+    ex = 1;
+  }
+  return ex;
+}
+
+int pcap_offline_filter_escaping(struct bpf_program *fp, uintptr_t pkt_hdr, uintptr_t pkt) {
+	return pcap_offline_filter(fp, (struct pcap_pkthdr*)(pkt_hdr), (const u_char*)(pkt));
+}
+
+// pcap_wait returns when the next packet is available or the timeout expires.
+// Since it uses pcap_get_selectable_fd, it will not work in Windows.
+int pcap_wait(pcap_t *p, int usec) {
+	fd_set fds;
+	int fd;
+	struct timeval tv;
+
+	fd = pcap_get_selectable_fd(p);
+	if(fd < 0) {
+		return fd;
+	}
+
+	FD_ZERO(&fds);
+	FD_SET(fd, &fds);
+
+	tv.tv_sec = 0;
+	tv.tv_usec = usec;
+
+	if(usec != 0) {
+		return select(fd+1, &fds, NULL, NULL, &tv);
+	}
+
+	// block indefinitely if no timeout provided
+	return select(fd+1, &fds, NULL, NULL, NULL);
+}
+
+*/
+import "C"
+
+const errorBufferSize = C.PCAP_ERRBUF_SIZE
+
+const (
+	pcapErrorNotActivated    = C.PCAP_ERROR_NOT_ACTIVATED
+	pcapErrorActivated       = C.PCAP_ERROR_ACTIVATED
+	pcapWarningPromisc       = C.PCAP_WARNING_PROMISC_NOTSUP
+	pcapErrorNoSuchDevice    = C.PCAP_ERROR_NO_SUCH_DEVICE
+	pcapErrorDenied          = C.PCAP_ERROR_PERM_DENIED
+	pcapErrorNotUp           = C.PCAP_ERROR_IFACE_NOT_UP
+	pcapWarning              = C.PCAP_WARNING
+	pcapDIN                  = C.PCAP_D_IN
+	pcapDOUT                 = C.PCAP_D_OUT
+	pcapDINOUT               = C.PCAP_D_INOUT
+	pcapNetmaskUnknown       = C.PCAP_NETMASK_UNKNOWN
+	pcapTstampPrecisionMicro = C.PCAP_TSTAMP_PRECISION_MICRO
+	pcapTstampPrecisionNano  = C.PCAP_TSTAMP_PRECISION_NANO
+)
+
+type pcapPkthdr C.struct_pcap_pkthdr
+type pcapTPtr *C.struct_pcap
+type pcapBpfProgram C.struct_bpf_program
+
+func (h *pcapPkthdr) getSec() int64 {
+	return int64(h.ts.tv_sec)
+}
+
+func (h *pcapPkthdr) getUsec() int64 {
+	return int64(h.ts.tv_usec)
+}
+
+func (h *pcapPkthdr) getLen() int {
+	return int(h.len)
+}
+
+func (h *pcapPkthdr) getCaplen() int {
+	return int(h.caplen)
+}
+
+func pcapGetTstampPrecision(cptr pcapTPtr) int {
+	return int(C.pcap_get_tstamp_precision(cptr))
+}
+
+func pcapSetTstampPrecision(cptr pcapTPtr, precision int) error {
+	ret := C.pcap_set_tstamp_precision(cptr, C.int(precision))
+	if ret < 0 {
+		return errors.New(C.GoString(C.pcap_geterr(cptr)))
+	}
+	return nil
+}
+
+func statusError(status C.int) error {
+	return errors.New(C.GoString(C.pcap_statustostr(status)))
+}
+
+func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, error) {
+	buf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+
+	dev := C.CString(device)
+	defer C.free(unsafe.Pointer(dev))
+
+	cptr := C.pcap_open_live(dev, C.int(snaplen), C.int(pro), C.int(timeout), buf)
+	if cptr == nil {
+		return nil, errors.New(C.GoString(buf))
+	}
+	return &Handle{cptr: cptr}, nil
+}
+
+func openOffline(file string) (handle *Handle, err error) {
+	buf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+	cf := C.CString(file)
+	defer C.free(unsafe.Pointer(cf))
+
+	cptr := C.pcap_open_offline_with_tstamp_precision(cf, C.PCAP_TSTAMP_PRECISION_NANO, buf)
+	if cptr == nil {
+		return nil, errors.New(C.GoString(buf))
+	}
+	return &Handle{cptr: cptr}, nil
+}
+
+func (p *Handle) pcapClose() {
+	if p.cptr != nil {
+		C.pcap_close(p.cptr)
+	}
+	p.cptr = nil
+}
+
+func (p *Handle) pcapGeterr() error {
+	return errors.New(C.GoString(C.pcap_geterr(p.cptr)))
+}
+
+func (p *Handle) pcapStats() (*Stats, error) {
+	var cstats C.struct_pcap_stat
+	if C.pcap_stats(p.cptr, &cstats) < 0 {
+		return nil, p.pcapGeterr()
+	}
+	return &Stats{
+		PacketsReceived:  int(cstats.ps_recv),
+		PacketsDropped:   int(cstats.ps_drop),
+		PacketsIfDropped: int(cstats.ps_ifdrop),
+	}, nil
+}
+
+// for libpcap < 1.8 pcap_compile is NOT thread-safe, so protect it.
+var pcapCompileMu sync.Mutex
+
+func (p *Handle) pcapCompile(expr string, maskp uint32) (pcapBpfProgram, error) {
+	var bpf pcapBpfProgram
+	cexpr := C.CString(expr)
+	defer C.free(unsafe.Pointer(cexpr))
+
+	pcapCompileMu.Lock()
+	defer pcapCompileMu.Unlock()
+	if C.pcap_compile(p.cptr, (*C.struct_bpf_program)(&bpf), cexpr, 1, C.bpf_u_int32(maskp)) < 0 {
+		return bpf, p.pcapGeterr()
+	}
+	return bpf, nil
+}
+
+func (p pcapBpfProgram) free() {
+	C.pcap_freecode((*C.struct_bpf_program)(&p))
+}
+
+func (p pcapBpfProgram) toBPFInstruction() []BPFInstruction {
+	bpfInsn := (*[bpfInstructionBufferSize]C.struct_bpf_insn)(unsafe.Pointer(p.bf_insns))[0:p.bf_len:p.bf_len]
+	bpfInstruction := make([]BPFInstruction, len(bpfInsn), len(bpfInsn))
+
+	for i, v := range bpfInsn {
+		bpfInstruction[i].Code = uint16(v.code)
+		bpfInstruction[i].Jt = uint8(v.jt)
+		bpfInstruction[i].Jf = uint8(v.jf)
+		bpfInstruction[i].K = uint32(v.k)
+	}
+	return bpfInstruction
+}
+
+func pcapBpfProgramFromInstructions(bpfInstructions []BPFInstruction) pcapBpfProgram {
+	var bpf pcapBpfProgram
+	bpf.bf_len = C.u_int(len(bpfInstructions))
+	cbpfInsns := C.calloc(C.size_t(len(bpfInstructions)), C.size_t(unsafe.Sizeof(bpfInstructions[0])))
+	gbpfInsns := (*[bpfInstructionBufferSize]C.struct_bpf_insn)(cbpfInsns)
+
+	for i, v := range bpfInstructions {
+		gbpfInsns[i].code = C.u_short(v.Code)
+		gbpfInsns[i].jt = C.u_char(v.Jt)
+		gbpfInsns[i].jf = C.u_char(v.Jf)
+		gbpfInsns[i].k = C.bpf_u_int32(v.K)
+	}
+
+	bpf.bf_insns = (*C.struct_bpf_insn)(cbpfInsns)
+	return bpf
+}
+
+func pcapLookupnet(device string) (netp, maskp uint32, err error) {
+	errorBuf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(errorBuf))
+	dev := C.CString(device)
+	defer C.free(unsafe.Pointer(dev))
+	if C.pcap_lookupnet(
+		dev,
+		(*C.bpf_u_int32)(unsafe.Pointer(&netp)),
+		(*C.bpf_u_int32)(unsafe.Pointer(&maskp)),
+		errorBuf,
+	) < 0 {
+		return 0, 0, errors.New(C.GoString(errorBuf))
+		// We can't lookup the network, but that could be because the interface
+		// doesn't have an IPv4.
+	}
+	return
+}
+
+func (b *BPF) pcapOfflineFilter(ci gopacket.CaptureInfo, data []byte) bool {
+	hdr := (*C.struct_pcap_pkthdr)(&b.hdr)
+	hdr.ts.tv_sec = C.gopacket_time_secs_t(ci.Timestamp.Unix())
+	hdr.ts.tv_usec = C.gopacket_time_usecs_t(ci.Timestamp.Nanosecond() / 1000)
+	hdr.caplen = C.bpf_u_int32(len(data)) // Trust actual length over ci.Length.
+	hdr.len = C.bpf_u_int32(ci.Length)
+	dataptr := (*C.u_char)(unsafe.Pointer(&data[0]))
+	return C.pcap_offline_filter_escaping((*C.struct_bpf_program)(&b.bpf),
+		C.uintptr_t(uintptr(unsafe.Pointer(hdr))),
+		C.uintptr_t(uintptr(unsafe.Pointer(dataptr)))) != 0
+}
+
+func (p *Handle) pcapSetfilter(bpf pcapBpfProgram) error {
+	if C.pcap_setfilter(p.cptr, (*C.struct_bpf_program)(&bpf)) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func (p *Handle) pcapListDatalinks() (datalinks []Datalink, err error) {
+	var dltbuf *C.int
+
+	n := int(C.pcap_list_datalinks(p.cptr, &dltbuf))
+	if n < 0 {
+		return nil, p.pcapGeterr()
+	}
+
+	defer C.pcap_free_datalinks(dltbuf)
+
+	datalinks = make([]Datalink, n)
+
+	dltArray := (*[1 << 28]C.int)(unsafe.Pointer(dltbuf))
+
+	for i := 0; i < n; i++ {
+		datalinks[i].Name = pcapDatalinkValToName(int((*dltArray)[i]))
+		datalinks[i].Description = pcapDatalinkValToDescription(int((*dltArray)[i]))
+	}
+
+	return datalinks, nil
+}
+
+func pcapOpenDead(linkType layers.LinkType, captureLength int) (*Handle, error) {
+	cptr := C.pcap_open_dead(C.int(linkType), C.int(captureLength))
+	if cptr == nil {
+		return nil, errors.New("error opening dead capture")
+	}
+
+	return &Handle{cptr: cptr}, nil
+}
+
+func (p *Handle) pcapNextPacketEx() NextError {
+	// This horrible magic allows us to pass a ptr-to-ptr to pcap_next_ex
+	// without causing that ptr-to-ptr to itself be allocated on the heap.
+	// Since Handle itself survives through the duration of the pcap_next_ex
+	// call, this should be perfectly safe for GC stuff, etc.
+
+	return NextError(C.pcap_next_ex_escaping(p.cptr, C.uintptr_t(uintptr(unsafe.Pointer(&p.pkthdr))), C.uintptr_t(uintptr(unsafe.Pointer(&p.bufptr)))))
+}
+
+func (p *Handle) pcapDatalink() layers.LinkType {
+	return layers.LinkType(C.pcap_datalink(p.cptr))
+}
+
+func (p *Handle) pcapSetDatalink(dlt layers.LinkType) error {
+	if C.pcap_set_datalink(p.cptr, C.int(dlt)) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func pcapDatalinkValToName(dlt int) string {
+	return C.GoString(C.pcap_datalink_val_to_name(C.int(dlt)))
+}
+
+func pcapDatalinkValToDescription(dlt int) string {
+	return C.GoString(C.pcap_datalink_val_to_description(C.int(dlt)))
+}
+
+func pcapDatalinkNameToVal(name string) int {
+	cptr := C.CString(name)
+	defer C.free(unsafe.Pointer(cptr))
+	return int(C.pcap_datalink_name_to_val(cptr))
+}
+
+func pcapLibVersion() string {
+	return C.GoString(C.pcap_lib_version())
+}
+
+func (p *Handle) isOpen() bool {
+	return p.cptr != nil
+}
+
+type pcapDevices struct {
+	all, cur *C.pcap_if_t
+}
+
+func (p pcapDevices) free() {
+	C.pcap_freealldevs((*C.pcap_if_t)(p.all))
+}
+
+func (p *pcapDevices) next() bool {
+	if p.cur == nil {
+		p.cur = p.all
+		if p.cur == nil {
+			return false
+		}
+		return true
+	}
+	if p.cur.next == nil {
+		return false
+	}
+	p.cur = p.cur.next
+	return true
+}
+
+func (p pcapDevices) name() string {
+	return C.GoString(p.cur.name)
+}
+
+func (p pcapDevices) description() string {
+	return C.GoString(p.cur.description)
+}
+
+func (p pcapDevices) flags() uint32 {
+	return uint32(p.cur.flags)
+}
+
+type pcapAddresses struct {
+	all, cur *C.pcap_addr_t
+}
+
+func (p *pcapAddresses) next() bool {
+	if p.cur == nil {
+		p.cur = p.all
+		if p.cur == nil {
+			return false
+		}
+		return true
+	}
+	if p.cur.next == nil {
+		return false
+	}
+	p.cur = p.cur.next
+	return true
+}
+
+func (p pcapAddresses) addr() *syscall.RawSockaddr {
+	return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.addr))
+}
+
+func (p pcapAddresses) netmask() *syscall.RawSockaddr {
+	return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.netmask))
+}
+
+func (p pcapAddresses) broadaddr() *syscall.RawSockaddr {
+	return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.broadaddr))
+}
+
+func (p pcapAddresses) dstaddr() *syscall.RawSockaddr {
+	return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.dstaddr))
+}
+
+func (p pcapDevices) addresses() pcapAddresses {
+	return pcapAddresses{all: p.cur.addresses}
+}
+
+func pcapFindAllDevs() (pcapDevices, error) {
+	var buf *C.char
+	buf = (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+	var alldevsp pcapDevices
+
+	if C.pcap_findalldevs((**C.pcap_if_t)(&alldevsp.all), buf) < 0 {
+		return pcapDevices{}, errors.New(C.GoString(buf))
+	}
+	return alldevsp, nil
+}
+
+func (p *Handle) pcapSendpacket(data []byte) error {
+	if C.pcap_sendpacket(p.cptr, (*C.u_char)(&data[0]), (C.int)(len(data))) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func (p *Handle) pcapSetdirection(direction Direction) error {
+	if status := C.pcap_setdirection(p.cptr, (C.pcap_direction_t)(direction)); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *Handle) pcapSnapshot() int {
+	return int(C.pcap_snapshot(p.cptr))
+}
+
+func (t TimestampSource) pcapTstampTypeValToName() string {
+	return C.GoString(C.pcap_tstamp_type_val_to_name(C.int(t)))
+}
+
+func pcapTstampTypeNameToVal(s string) (TimestampSource, error) {
+	cs := C.CString(s)
+	defer C.free(unsafe.Pointer(cs))
+	t := C.pcap_tstamp_type_name_to_val(cs)
+	if t < 0 {
+		return 0, statusError(t)
+	}
+	return TimestampSource(t), nil
+}
+
+func (p *InactiveHandle) pcapGeterr() error {
+	return errors.New(C.GoString(C.pcap_geterr(p.cptr)))
+}
+
+func (p *InactiveHandle) pcapActivate() (*Handle, activateError) {
+	ret := activateError(C.pcap_activate(p.cptr))
+	if ret != aeNoError {
+		return nil, ret
+	}
+	h := &Handle{
+		cptr: p.cptr,
+	}
+	p.cptr = nil
+	return h, ret
+}
+
+func (p *InactiveHandle) pcapClose() {
+	if p.cptr != nil {
+		C.pcap_close(p.cptr)
+	}
+}
+
+func pcapCreate(device string) (*InactiveHandle, error) {
+	buf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+	dev := C.CString(device)
+	defer C.free(unsafe.Pointer(dev))
+
+	cptr := C.pcap_create(dev, buf)
+	if cptr == nil {
+		return nil, errors.New(C.GoString(buf))
+	}
+	return &InactiveHandle{cptr: cptr}, nil
+}
+
+func (p *InactiveHandle) pcapSetSnaplen(snaplen int) error {
+	if status := C.pcap_set_snaplen(p.cptr, C.int(snaplen)); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetPromisc(promisc bool) error {
+	var pro C.int
+	if promisc {
+		pro = 1
+	}
+	if status := C.pcap_set_promisc(p.cptr, pro); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetTimeout(timeout time.Duration) error {
+	if status := C.pcap_set_timeout(p.cptr, C.int(timeoutMillis(timeout))); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapListTstampTypes() (out []TimestampSource) {
+	var types *C.int
+	n := int(C.pcap_list_tstamp_types(p.cptr, &types))
+	if n < 0 {
+		return // public interface doesn't have error :(
+	}
+	defer C.pcap_free_tstamp_types(types)
+	typesArray := (*[1 << 28]C.int)(unsafe.Pointer(types))
+	for i := 0; i < n; i++ {
+		out = append(out, TimestampSource((*typesArray)[i]))
+	}
+	return
+}
+
+func (p *InactiveHandle) pcapSetTstampType(t TimestampSource) error {
+	if status := C.pcap_set_tstamp_type(p.cptr, C.int(t)); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetRfmon(monitor bool) error {
+	var mon C.int
+	if monitor {
+		mon = 1
+	}
+	switch canset := C.pcap_can_set_rfmon(p.cptr); canset {
+	case 0:
+		return CannotSetRFMon
+	case 1:
+		// success
+	default:
+		return statusError(canset)
+	}
+	if status := C.pcap_set_rfmon(p.cptr, mon); status != 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetBufferSize(bufferSize int) error {
+	if status := C.pcap_set_buffer_size(p.cptr, C.int(bufferSize)); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetImmediateMode(mode bool) error {
+	var md C.int
+	if mode {
+		md = 1
+	}
+	if status := C.pcap_set_immediate_mode(p.cptr, md); status < 0 {
+		return statusError(status)
+	}
+	return nil
+}
+
+func (p *Handle) setNonBlocking() error {
+	buf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+
+	// Change the device to non-blocking, we'll use pcap_wait to wait until the
+	// handle is ready to read.
+	if v := C.pcap_setnonblock(p.cptr, 1, buf); v < -1 {
+		return errors.New(C.GoString(buf))
+	}
+
+	return nil
+}
+
+// waitForPacket waits for a packet or for the timeout to expire.
+func (p *Handle) waitForPacket() {
+	// need to wait less than the read timeout according to pcap documentation.
+	// timeoutMillis rounds up to at least one millisecond so we can safely
+	// subtract up to a millisecond.
+	usec := timeoutMillis(p.timeout) * 1000
+	usec -= 100
+
+	C.pcap_wait(p.cptr, C.int(usec))
+}
+
+// openOfflineFile returns contents of input file as a *Handle.
+func openOfflineFile(file *os.File) (handle *Handle, err error) {
+	buf := (*C.char)(C.calloc(errorBufferSize, 1))
+	defer C.free(unsafe.Pointer(buf))
+	cmode := C.CString("rb")
+	defer C.free(unsafe.Pointer(cmode))
+	cf := C.fdopen(C.int(file.Fd()), cmode)
+
+	cptr := C.pcap_fopen_offline_with_tstamp_precision(cf, C.PCAP_TSTAMP_PRECISION_NANO, buf)
+	if cptr == nil {
+		return nil, errors.New(C.GoString(buf))
+	}
+	return &Handle{cptr: cptr}, nil
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap_windows.go b/vendor/github.com/google/gopacket/pcap/pcap_windows.go
new file mode 100644
index 0000000..0a53338
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap_windows.go
@@ -0,0 +1,885 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package pcap
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"runtime"
+	"sync"
+	"syscall"
+	"time"
+	"unsafe"
+
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+)
+
+var pcapLoaded = false
+
+const npcapPath = "\\Npcap"
+
+func initDllPath(kernel32 syscall.Handle) {
+	setDllDirectory, err := syscall.GetProcAddress(kernel32, "SetDllDirectoryA")
+	if err != nil {
+		// we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+		return
+	}
+	getSystemDirectory, err := syscall.GetProcAddress(kernel32, "GetSystemDirectoryA")
+	if err != nil {
+		// we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+		return
+	}
+	buf := make([]byte, 4096)
+	r, _, _ := syscall.Syscall(getSystemDirectory, 2, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0)
+	if r == 0 || r > 4096-uintptr(len(npcapPath))-1 {
+		// we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+		return
+	}
+	copy(buf[r:], npcapPath)
+	_, _, _ = syscall.Syscall(setDllDirectory, 1, uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+	// ignore errors here - we just fallback to load wpcap.dll from default locations
+}
+
+// loadedDllPath will hold the full pathname of the loaded wpcap.dll after init if possible
+var loadedDllPath = "wpcap.dll"
+
+func initLoadedDllPath(kernel32 syscall.Handle) {
+	getModuleFileName, err := syscall.GetProcAddress(kernel32, "GetModuleFileNameA")
+	if err != nil {
+		// we can't get the filename of the loaded module in this case - just leave default of wpcap.dll
+		return
+	}
+	buf := make([]byte, 4096)
+	r, _, _ := syscall.Syscall(getModuleFileName, 3, uintptr(wpcapHandle), uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
+	if r == 0 {
+		// we can't get the filename of the loaded module in this case - just leave default of wpcap.dll
+		return
+	}
+	loadedDllPath = string(buf[:int(r)])
+}
+
+func mustLoad(fun string) uintptr {
+	addr, err := syscall.GetProcAddress(wpcapHandle, fun)
+	if err != nil {
+		panic(fmt.Sprintf("Couldn't load function %s from %s", fun, loadedDllPath))
+	}
+	return addr
+}
+
+func mightLoad(fun string) uintptr {
+	addr, err := syscall.GetProcAddress(wpcapHandle, fun)
+	if err != nil {
+		return 0
+	}
+	return addr
+}
+
+func byteSliceToString(bval []byte) string {
+	for i := range bval {
+		if bval[i] == 0 {
+			return string(bval[:i])
+		}
+	}
+	return string(bval[:])
+}
+
+// bytePtrToString returns a string copied from pointer to a null terminated byte array
+// WARNING: ONLY SAFE WITH IF r POINTS TO C MEMORY!
+// govet will complain about this function for the reason stated above
+func bytePtrToString(r uintptr) string {
+	if r == 0 {
+		return ""
+	}
+	bval := (*[1 << 30]byte)(unsafe.Pointer(r))
+	return byteSliceToString(bval[:])
+}
+
+var wpcapHandle syscall.Handle
+var msvcrtHandle syscall.Handle
+var (
+	callocPtr,
+	pcapStrerrorPtr,
+	pcapStatustostrPtr,
+	pcapOpenLivePtr,
+	pcapOpenOfflinePtr,
+	pcapClosePtr,
+	pcapGeterrPtr,
+	pcapStatsPtr,
+	pcapCompilePtr,
+	pcapFreecodePtr,
+	pcapLookupnetPtr,
+	pcapOfflineFilterPtr,
+	pcapSetfilterPtr,
+	pcapListDatalinksPtr,
+	pcapFreeDatalinksPtr,
+	pcapDatalinkValToNamePtr,
+	pcapDatalinkValToDescriptionPtr,
+	pcapOpenDeadPtr,
+	pcapNextExPtr,
+	pcapDatalinkPtr,
+	pcapSetDatalinkPtr,
+	pcapDatalinkNameToValPtr,
+	pcapLibVersionPtr,
+	pcapFreealldevsPtr,
+	pcapFindalldevsPtr,
+	pcapSendpacketPtr,
+	pcapSetdirectionPtr,
+	pcapSnapshotPtr,
+	pcapTstampTypeValToNamePtr,
+	pcapTstampTypeNameToValPtr,
+	pcapListTstampTypesPtr,
+	pcapFreeTstampTypesPtr,
+	pcapSetTstampTypePtr,
+	pcapGetTstampPrecisionPtr,
+	pcapSetTstampPrecisionPtr,
+	pcapOpenOfflineWithTstampPrecisionPtr,
+	pcapHOpenOfflineWithTstampPrecisionPtr,
+	pcapActivatePtr,
+	pcapCreatePtr,
+	pcapSetSnaplenPtr,
+	pcapSetPromiscPtr,
+	pcapSetTimeoutPtr,
+	pcapCanSetRfmonPtr,
+	pcapSetRfmonPtr,
+	pcapSetBufferSizePtr,
+	pcapSetImmediateModePtr,
+	pcapHopenOfflinePtr uintptr
+)
+
+func init() {
+	LoadWinPCAP()
+}
+
+// LoadWinPCAP attempts to dynamically load the wpcap DLL and resolve necessary functions
+func LoadWinPCAP() error {
+	if pcapLoaded {
+		return nil
+	}
+
+	kernel32, err := syscall.LoadLibrary("kernel32.dll")
+	if err != nil {
+		return fmt.Errorf("couldn't load kernel32.dll")
+	}
+	defer syscall.FreeLibrary(kernel32)
+
+	initDllPath(kernel32)
+
+	wpcapHandle, err = syscall.LoadLibrary("wpcap.dll")
+	if err != nil {
+		return fmt.Errorf("couldn't load wpcap.dll")
+	}
+	initLoadedDllPath(kernel32)
+	msvcrtHandle, err = syscall.LoadLibrary("msvcrt.dll")
+	if err != nil {
+		return fmt.Errorf("couldn't load msvcrt.dll")
+	}
+	callocPtr, err = syscall.GetProcAddress(msvcrtHandle, "calloc")
+	if err != nil {
+		return fmt.Errorf("couldn't get calloc function")
+	}
+
+	pcapStrerrorPtr = mustLoad("pcap_strerror")
+	pcapStatustostrPtr = mightLoad("pcap_statustostr") // not available on winpcap
+	pcapOpenLivePtr = mustLoad("pcap_open_live")
+	pcapOpenOfflinePtr = mustLoad("pcap_open_offline")
+	pcapClosePtr = mustLoad("pcap_close")
+	pcapGeterrPtr = mustLoad("pcap_geterr")
+	pcapStatsPtr = mustLoad("pcap_stats")
+	pcapCompilePtr = mustLoad("pcap_compile")
+	pcapFreecodePtr = mustLoad("pcap_freecode")
+	pcapLookupnetPtr = mustLoad("pcap_lookupnet")
+	pcapOfflineFilterPtr = mustLoad("pcap_offline_filter")
+	pcapSetfilterPtr = mustLoad("pcap_setfilter")
+	pcapListDatalinksPtr = mustLoad("pcap_list_datalinks")
+	pcapFreeDatalinksPtr = mustLoad("pcap_free_datalinks")
+	pcapDatalinkValToNamePtr = mustLoad("pcap_datalink_val_to_name")
+	pcapDatalinkValToDescriptionPtr = mustLoad("pcap_datalink_val_to_description")
+	pcapOpenDeadPtr = mustLoad("pcap_open_dead")
+	pcapNextExPtr = mustLoad("pcap_next_ex")
+	pcapDatalinkPtr = mustLoad("pcap_datalink")
+	pcapSetDatalinkPtr = mustLoad("pcap_set_datalink")
+	pcapDatalinkNameToValPtr = mustLoad("pcap_datalink_name_to_val")
+	pcapLibVersionPtr = mustLoad("pcap_lib_version")
+	pcapFreealldevsPtr = mustLoad("pcap_freealldevs")
+	pcapFindalldevsPtr = mustLoad("pcap_findalldevs")
+	pcapSendpacketPtr = mustLoad("pcap_sendpacket")
+	pcapSetdirectionPtr = mustLoad("pcap_setdirection")
+	pcapSnapshotPtr = mustLoad("pcap_snapshot")
+	//libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+	pcapTstampTypeValToNamePtr = mightLoad("pcap_tstamp_type_val_to_name")
+	pcapTstampTypeNameToValPtr = mightLoad("pcap_tstamp_type_name_to_val")
+	pcapListTstampTypesPtr = mightLoad("pcap_list_tstamp_types")
+	pcapFreeTstampTypesPtr = mightLoad("pcap_free_tstamp_types")
+	pcapSetTstampTypePtr = mightLoad("pcap_set_tstamp_type")
+	pcapGetTstampPrecisionPtr = mightLoad("pcap_get_tstamp_precision")
+	pcapSetTstampPrecisionPtr = mightLoad("pcap_set_tstamp_precision")
+	pcapOpenOfflineWithTstampPrecisionPtr = mightLoad("pcap_open_offline_with_tstamp_precision")
+	pcapHOpenOfflineWithTstampPrecisionPtr = mightLoad("pcap_hopen_offline_with_tstamp_precision")
+	pcapActivatePtr = mustLoad("pcap_activate")
+	pcapCreatePtr = mustLoad("pcap_create")
+	pcapSetSnaplenPtr = mustLoad("pcap_set_snaplen")
+	pcapSetPromiscPtr = mustLoad("pcap_set_promisc")
+	pcapSetTimeoutPtr = mustLoad("pcap_set_timeout")
+	//winpcap does not support rfmon
+	pcapCanSetRfmonPtr = mightLoad("pcap_can_set_rfmon")
+	pcapSetRfmonPtr = mightLoad("pcap_set_rfmon")
+	pcapSetBufferSizePtr = mustLoad("pcap_set_buffer_size")
+	//libpcap <1.5 does not have pcap_set_immediate_mode
+	pcapSetImmediateModePtr = mightLoad("pcap_set_immediate_mode")
+	pcapHopenOfflinePtr = mustLoad("pcap_hopen_offline")
+
+	pcapLoaded = true
+	return nil
+}
+
+func (h *pcapPkthdr) getSec() int64 {
+	return int64(h.Ts.Sec)
+}
+
+func (h *pcapPkthdr) getUsec() int64 {
+	return int64(h.Ts.Usec)
+}
+
+func (h *pcapPkthdr) getLen() int {
+	return int(h.Len)
+}
+
+func (h *pcapPkthdr) getCaplen() int {
+	return int(h.Caplen)
+}
+
+func statusError(status pcapCint) error {
+	var ret uintptr
+	if pcapStatustostrPtr == 0 {
+		ret, _, _ = syscall.Syscall(pcapStrerrorPtr, 1, uintptr(status), 0, 0)
+	} else {
+		ret, _, _ = syscall.Syscall(pcapStatustostrPtr, 1, uintptr(status), 0, 0)
+	}
+	return errors.New(bytePtrToString(ret))
+}
+
+func pcapGetTstampPrecision(cptr pcapTPtr) int {
+	if pcapGetTstampPrecisionPtr == 0 {
+		return pcapTstampPrecisionMicro
+	}
+	ret, _, _ := syscall.Syscall(pcapGetTstampPrecisionPtr, 1, uintptr(cptr), 0, 0)
+	return int(pcapCint(ret))
+}
+
+func pcapSetTstampPrecision(cptr pcapTPtr, precision int) error {
+	if pcapSetTstampPrecisionPtr == 0 {
+		return errors.New("Not supported")
+	}
+	ret, _, _ := syscall.Syscall(pcapSetTstampPrecisionPtr, 2, uintptr(cptr), uintptr(precision), 0)
+	if pcapCint(ret) < 0 {
+		return errors.New("Not supported")
+	}
+	return nil
+}
+
+func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, error) {
+	err := LoadWinPCAP()
+	if err != nil {
+		return nil, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+	dev, err := syscall.BytePtrFromString(device)
+	if err != nil {
+		return nil, err
+	}
+
+	cptr, _, _ := syscall.Syscall6(pcapOpenLivePtr, 5, uintptr(unsafe.Pointer(dev)), uintptr(snaplen), uintptr(pro), uintptr(timeout), uintptr(unsafe.Pointer(&buf[0])), 0)
+
+	if cptr == 0 {
+		return nil, errors.New(byteSliceToString(buf))
+	}
+	return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func openOffline(file string) (handle *Handle, err error) {
+	err = LoadWinPCAP()
+	if err != nil {
+		return nil, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+	f, err := syscall.BytePtrFromString(file)
+	if err != nil {
+		return nil, err
+	}
+
+	var cptr uintptr
+	if pcapOpenOfflineWithTstampPrecisionPtr == 0 {
+		cptr, _, _ = syscall.Syscall(pcapOpenOfflinePtr, 2, uintptr(unsafe.Pointer(f)), uintptr(unsafe.Pointer(&buf[0])), 0)
+	} else {
+		cptr, _, _ = syscall.Syscall(pcapOpenOfflineWithTstampPrecisionPtr, 3, uintptr(unsafe.Pointer(f)), uintptr(pcapTstampPrecisionNano), uintptr(unsafe.Pointer(&buf[0])))
+	}
+
+	if cptr == 0 {
+		return nil, errors.New(byteSliceToString(buf))
+	}
+
+	h := &Handle{cptr: pcapTPtr(cptr)}
+	return h, nil
+}
+
+func (p *Handle) pcapClose() {
+	if p.cptr != 0 {
+		_, _, _ = syscall.Syscall(pcapClosePtr, 1, uintptr(p.cptr), 0, 0)
+	}
+	p.cptr = 0
+}
+
+func (p *Handle) pcapGeterr() error {
+	ret, _, _ := syscall.Syscall(pcapGeterrPtr, 1, uintptr(p.cptr), 0, 0)
+	return errors.New(bytePtrToString(ret))
+}
+
+func (p *Handle) pcapStats() (*Stats, error) {
+	var cstats pcapStats
+	ret, _, _ := syscall.Syscall(pcapStatsPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&cstats)), 0)
+	if pcapCint(ret) < 0 {
+		return nil, p.pcapGeterr()
+	}
+	return &Stats{
+		PacketsReceived:  int(cstats.Recv),
+		PacketsDropped:   int(cstats.Drop),
+		PacketsIfDropped: int(cstats.Ifdrop),
+	}, nil
+}
+
+// for libpcap < 1.8 pcap_compile is NOT thread-safe, so protect it.
+var pcapCompileMu sync.Mutex
+
+func (p *Handle) pcapCompile(expr string, maskp uint32) (pcapBpfProgram, error) {
+	var bpf pcapBpfProgram
+	cexpr, err := syscall.BytePtrFromString(expr)
+	if err != nil {
+		return pcapBpfProgram{}, err
+	}
+	pcapCompileMu.Lock()
+	defer pcapCompileMu.Unlock()
+	res, _, _ := syscall.Syscall6(pcapCompilePtr, 5, uintptr(p.cptr), uintptr(unsafe.Pointer(&bpf)), uintptr(unsafe.Pointer(cexpr)), uintptr(1), uintptr(maskp), 0)
+	if pcapCint(res) < 0 {
+		return bpf, p.pcapGeterr()
+	}
+	return bpf, nil
+}
+
+func (p pcapBpfProgram) free() {
+	_, _, _ = syscall.Syscall(pcapFreecodePtr, 1, uintptr(unsafe.Pointer(&p)), 0, 0)
+}
+
+func (p pcapBpfProgram) toBPFInstruction() []BPFInstruction {
+	bpfInsn := (*[bpfInstructionBufferSize]pcapBpfInstruction)(unsafe.Pointer(p.Insns))[0:p.Len:p.Len]
+	bpfInstruction := make([]BPFInstruction, len(bpfInsn), len(bpfInsn))
+
+	for i, v := range bpfInsn {
+		bpfInstruction[i].Code = v.Code
+		bpfInstruction[i].Jt = v.Jt
+		bpfInstruction[i].Jf = v.Jf
+		bpfInstruction[i].K = v.K
+	}
+	return bpfInstruction
+}
+
+func pcapBpfProgramFromInstructions(bpfInstructions []BPFInstruction) pcapBpfProgram {
+	var bpf pcapBpfProgram
+	bpf.Len = uint32(len(bpfInstructions))
+	cbpfInsns, _, _ := syscall.Syscall(callocPtr, 2, uintptr(len(bpfInstructions)), uintptr(unsafe.Sizeof(bpfInstructions[0])), 0)
+	gbpfInsns := (*[bpfInstructionBufferSize]pcapBpfInstruction)(unsafe.Pointer(cbpfInsns))
+
+	for i, v := range bpfInstructions {
+		gbpfInsns[i].Code = v.Code
+		gbpfInsns[i].Jt = v.Jt
+		gbpfInsns[i].Jf = v.Jf
+		gbpfInsns[i].K = v.K
+	}
+
+	bpf.Insns = (*pcapBpfInstruction)(unsafe.Pointer(cbpfInsns))
+	return bpf
+}
+
+func pcapLookupnet(device string) (netp, maskp uint32, err error) {
+	err = LoadWinPCAP()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+	dev, err := syscall.BytePtrFromString(device)
+	if err != nil {
+		return 0, 0, err
+	}
+	e, _, _ := syscall.Syscall6(pcapLookupnetPtr, 4, uintptr(unsafe.Pointer(dev)), uintptr(unsafe.Pointer(&netp)), uintptr(unsafe.Pointer(&maskp)), uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+	if pcapCint(e) < 0 {
+		return 0, 0, errors.New(byteSliceToString(buf))
+	}
+	return
+}
+
+func (b *BPF) pcapOfflineFilter(ci gopacket.CaptureInfo, data []byte) bool {
+	var hdr pcapPkthdr
+	hdr.Ts.Sec = int32(ci.Timestamp.Unix())
+	hdr.Ts.Usec = int32(ci.Timestamp.Nanosecond() / 1000)
+	hdr.Caplen = uint32(len(data)) // Trust actual length over ci.Length.
+	hdr.Len = uint32(ci.Length)
+	e, _, _ := syscall.Syscall(pcapOfflineFilterPtr, 3, uintptr(unsafe.Pointer(&b.bpf)), uintptr(unsafe.Pointer(&hdr)), uintptr(unsafe.Pointer(&data[0])))
+	return e != 0
+}
+
+func (p *Handle) pcapSetfilter(bpf pcapBpfProgram) error {
+	e, _, _ := syscall.Syscall(pcapSetfilterPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&bpf)), 0)
+	if pcapCint(e) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func (p *Handle) pcapListDatalinks() (datalinks []Datalink, err error) {
+	var dltbuf *pcapCint
+	ret, _, _ := syscall.Syscall(pcapListDatalinksPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&dltbuf)), 0)
+
+	n := int(pcapCint(ret))
+
+	if n < 0 {
+		return nil, p.pcapGeterr()
+	}
+	defer syscall.Syscall(pcapFreeDatalinksPtr, 1, uintptr(unsafe.Pointer(dltbuf)), 0, 0)
+
+	datalinks = make([]Datalink, n)
+
+	dltArray := (*[1 << 28]pcapCint)(unsafe.Pointer(dltbuf))
+
+	for i := 0; i < n; i++ {
+		datalinks[i].Name = pcapDatalinkValToName(int((*dltArray)[i]))
+		datalinks[i].Description = pcapDatalinkValToDescription(int((*dltArray)[i]))
+	}
+
+	return datalinks, nil
+}
+
+func pcapOpenDead(linkType layers.LinkType, captureLength int) (*Handle, error) {
+	err := LoadWinPCAP()
+	if err != nil {
+		return nil, err
+	}
+
+	cptr, _, _ := syscall.Syscall(pcapOpenDeadPtr, 2, uintptr(linkType), uintptr(captureLength), 0)
+	if cptr == 0 {
+		return nil, errors.New("error opening dead capture")
+	}
+
+	return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func (p *Handle) pcapNextPacketEx() NextError {
+	r, _, _ := syscall.Syscall(pcapNextExPtr, 3, uintptr(p.cptr), uintptr(unsafe.Pointer(&p.pkthdr)), uintptr(unsafe.Pointer(&p.bufptr)))
+	ret := pcapCint(r)
+	// According to https://github.com/the-tcpdump-group/libpcap/blob/1131a7c26c6f4d4772e4a2beeaf7212f4dea74ac/pcap.c#L398-L406 ,
+	// the return value of pcap_next_ex could be greater than 1 for success.
+	// Let's just make it 1 if it comes bigger than 1.
+	if ret > 1 {
+		ret = 1
+	}
+	return NextError(ret)
+}
+
+func (p *Handle) pcapDatalink() layers.LinkType {
+	ret, _, _ := syscall.Syscall(pcapDatalinkPtr, 1, uintptr(p.cptr), 0, 0)
+	return layers.LinkType(ret)
+}
+
+func (p *Handle) pcapSetDatalink(dlt layers.LinkType) error {
+	ret, _, _ := syscall.Syscall(pcapSetDatalinkPtr, 2, uintptr(p.cptr), uintptr(dlt), 0)
+	if pcapCint(ret) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func pcapDatalinkValToName(dlt int) string {
+	err := LoadWinPCAP()
+	if err != nil {
+		panic(err)
+	}
+	ret, _, _ := syscall.Syscall(pcapDatalinkValToNamePtr, 1, uintptr(dlt), 0, 0)
+	return bytePtrToString(ret)
+}
+
+func pcapDatalinkValToDescription(dlt int) string {
+	err := LoadWinPCAP()
+	if err != nil {
+		panic(err)
+	}
+	ret, _, _ := syscall.Syscall(pcapDatalinkValToDescriptionPtr, 1, uintptr(dlt), 0, 0)
+	return bytePtrToString(ret)
+}
+
+func pcapDatalinkNameToVal(name string) int {
+	err := LoadWinPCAP()
+	if err != nil {
+		panic(err)
+	}
+	cptr, err := syscall.BytePtrFromString(name)
+	if err != nil {
+		return 0
+	}
+	ret, _, _ := syscall.Syscall(pcapDatalinkNameToValPtr, 1, uintptr(unsafe.Pointer(cptr)), 0, 0)
+	return int(pcapCint(ret))
+}
+
+func pcapLibVersion() string {
+	err := LoadWinPCAP()
+	if err != nil {
+		panic(err)
+	}
+	ret, _, _ := syscall.Syscall(pcapLibVersionPtr, 0, 0, 0, 0)
+	return bytePtrToString(ret)
+}
+
+func (p *Handle) isOpen() bool {
+	return p.cptr != 0
+}
+
+type pcapDevices struct {
+	all, cur *pcapIf
+}
+
+func (p pcapDevices) free() {
+	syscall.Syscall(pcapFreealldevsPtr, 1, uintptr(unsafe.Pointer(p.all)), 0, 0)
+}
+
+func (p *pcapDevices) next() bool {
+	if p.cur == nil {
+		p.cur = p.all
+		if p.cur == nil {
+			return false
+		}
+		return true
+	}
+	if p.cur.Next == nil {
+		return false
+	}
+	p.cur = p.cur.Next
+	return true
+}
+
+func (p pcapDevices) name() string {
+	return bytePtrToString(uintptr(unsafe.Pointer(p.cur.Name)))
+}
+
+func (p pcapDevices) description() string {
+	return bytePtrToString(uintptr(unsafe.Pointer(p.cur.Description)))
+}
+
+func (p pcapDevices) flags() uint32 {
+	return p.cur.Flags
+}
+
+type pcapAddresses struct {
+	all, cur *pcapAddr
+}
+
+func (p *pcapAddresses) next() bool {
+	if p.cur == nil {
+		p.cur = p.all
+		if p.cur == nil {
+			return false
+		}
+		return true
+	}
+	if p.cur.Next == nil {
+		return false
+	}
+	p.cur = p.cur.Next
+	return true
+}
+
+func (p pcapAddresses) addr() *syscall.RawSockaddr {
+	return p.cur.Addr
+}
+
+func (p pcapAddresses) netmask() *syscall.RawSockaddr {
+	return p.cur.Netmask
+}
+
+func (p pcapAddresses) broadaddr() *syscall.RawSockaddr {
+	return p.cur.Broadaddr
+}
+
+func (p pcapAddresses) dstaddr() *syscall.RawSockaddr {
+	return p.cur.Dstaddr
+}
+
+func (p pcapDevices) addresses() pcapAddresses {
+	return pcapAddresses{all: p.cur.Addresses}
+}
+
+func pcapFindAllDevs() (pcapDevices, error) {
+	var alldevsp pcapDevices
+	err := LoadWinPCAP()
+	if err != nil {
+		return alldevsp, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+
+	ret, _, _ := syscall.Syscall(pcapFindalldevsPtr, 2, uintptr(unsafe.Pointer(&alldevsp.all)), uintptr(unsafe.Pointer(&buf[0])), 0)
+
+	if pcapCint(ret) < 0 {
+		return pcapDevices{}, errors.New(byteSliceToString(buf))
+	}
+	return alldevsp, nil
+}
+
+func (p *Handle) pcapSendpacket(data []byte) error {
+	ret, _, _ := syscall.Syscall(pcapSendpacketPtr, 3, uintptr(p.cptr), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data)))
+	if pcapCint(ret) < 0 {
+		return p.pcapGeterr()
+	}
+	return nil
+}
+
+func (p *Handle) pcapSetdirection(direction Direction) error {
+	status, _, _ := syscall.Syscall(pcapSetdirectionPtr, 2, uintptr(p.cptr), uintptr(direction), 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *Handle) pcapSnapshot() int {
+	ret, _, _ := syscall.Syscall(pcapSnapshotPtr, 1, uintptr(p.cptr), 0, 0)
+	return int(pcapCint(ret))
+}
+
+func (t TimestampSource) pcapTstampTypeValToName() string {
+	err := LoadWinPCAP()
+	if err != nil {
+		return err.Error()
+	}
+
+	//libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+	if pcapTstampTypeValToNamePtr == 0 {
+		return "pcap timestamp types not supported"
+	}
+	ret, _, _ := syscall.Syscall(pcapTstampTypeValToNamePtr, 1, uintptr(t), 0, 0)
+	return bytePtrToString(ret)
+}
+
+func pcapTstampTypeNameToVal(s string) (TimestampSource, error) {
+	err := LoadWinPCAP()
+	if err != nil {
+		return 0, err
+	}
+
+	//libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+	if pcapTstampTypeNameToValPtr == 0 {
+		return 0, statusError(pcapCint(pcapError))
+	}
+	cs, err := syscall.BytePtrFromString(s)
+	if err != nil {
+		return 0, err
+	}
+	ret, _, _ := syscall.Syscall(pcapTstampTypeNameToValPtr, 1, uintptr(unsafe.Pointer(cs)), 0, 0)
+	t := pcapCint(ret)
+	if t < 0 {
+		return 0, statusError(pcapCint(t))
+	}
+	return TimestampSource(t), nil
+}
+
+func (p *InactiveHandle) pcapGeterr() error {
+	ret, _, _ := syscall.Syscall(pcapGeterrPtr, 1, uintptr(p.cptr), 0, 0)
+	return errors.New(bytePtrToString(ret))
+}
+
+func (p *InactiveHandle) pcapActivate() (*Handle, activateError) {
+	r, _, _ := syscall.Syscall(pcapActivatePtr, 1, uintptr(p.cptr), 0, 0)
+	ret := activateError(pcapCint(r))
+	if ret != aeNoError {
+		return nil, ret
+	}
+	h := &Handle{
+		cptr: p.cptr,
+	}
+	p.cptr = 0
+	return h, ret
+}
+
+func (p *InactiveHandle) pcapClose() {
+	if p.cptr != 0 {
+		_, _, _ = syscall.Syscall(pcapClosePtr, 1, uintptr(p.cptr), 0, 0)
+	}
+	p.cptr = 0
+}
+
+func pcapCreate(device string) (*InactiveHandle, error) {
+	err := LoadWinPCAP()
+	if err != nil {
+		return nil, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+	dev, err := syscall.BytePtrFromString(device)
+	if err != nil {
+		return nil, err
+	}
+	cptr, _, _ := syscall.Syscall(pcapCreatePtr, 2, uintptr(unsafe.Pointer(dev)), uintptr(unsafe.Pointer(&buf[0])), 0)
+	if cptr == 0 {
+		return nil, errors.New(byteSliceToString(buf))
+	}
+	return &InactiveHandle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func (p *InactiveHandle) pcapSetSnaplen(snaplen int) error {
+	status, _, _ := syscall.Syscall(pcapSetSnaplenPtr, 2, uintptr(p.cptr), uintptr(snaplen), 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetPromisc(promisc bool) error {
+	var pro uintptr
+	if promisc {
+		pro = 1
+	}
+	status, _, _ := syscall.Syscall(pcapSetPromiscPtr, 2, uintptr(p.cptr), pro, 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetTimeout(timeout time.Duration) error {
+	status, _, _ := syscall.Syscall(pcapSetTimeoutPtr, 2, uintptr(p.cptr), uintptr(timeoutMillis(timeout)), 0)
+
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapListTstampTypes() (out []TimestampSource) {
+	//libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+	if pcapListTstampTypesPtr == 0 {
+		return
+	}
+	var types *pcapCint
+	ret, _, _ := syscall.Syscall(pcapListTstampTypesPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&types)), 0)
+	n := int(pcapCint(ret))
+	if n < 0 {
+		return // public interface doesn't have error :(
+	}
+	defer syscall.Syscall(pcapFreeTstampTypesPtr, 1, uintptr(unsafe.Pointer(types)), 0, 0)
+	typesArray := (*[1 << 28]pcapCint)(unsafe.Pointer(types))
+	for i := 0; i < n; i++ {
+		out = append(out, TimestampSource((*typesArray)[i]))
+	}
+	return
+}
+
+func (p *InactiveHandle) pcapSetTstampType(t TimestampSource) error {
+	//libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+	if pcapSetTstampTypePtr == 0 {
+		return statusError(pcapError)
+	}
+	status, _, _ := syscall.Syscall(pcapSetTstampTypePtr, 2, uintptr(p.cptr), uintptr(t), 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetRfmon(monitor bool) error {
+	//winpcap does not support rfmon
+	if pcapCanSetRfmonPtr == 0 {
+		return CannotSetRFMon
+	}
+	var mon uintptr
+	if monitor {
+		mon = 1
+	}
+	canset, _, _ := syscall.Syscall(pcapCanSetRfmonPtr, 1, uintptr(p.cptr), 0, 0)
+	switch canset {
+	case 0:
+		return CannotSetRFMon
+	case 1:
+		// success
+	default:
+		return statusError(pcapCint(canset))
+	}
+	status, _, _ := syscall.Syscall(pcapSetRfmonPtr, 2, uintptr(p.cptr), mon, 0)
+	if status != 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetBufferSize(bufferSize int) error {
+	status, _, _ := syscall.Syscall(pcapSetBufferSizePtr, 2, uintptr(p.cptr), uintptr(bufferSize), 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *InactiveHandle) pcapSetImmediateMode(mode bool) error {
+	//libpcap <1.5 does not have pcap_set_immediate_mode
+	if pcapSetImmediateModePtr == 0 {
+		return statusError(pcapError)
+	}
+	var md uintptr
+	if mode {
+		md = 1
+	}
+	status, _, _ := syscall.Syscall(pcapSetImmediateModePtr, 2, uintptr(p.cptr), md, 0)
+	if pcapCint(status) < 0 {
+		return statusError(pcapCint(status))
+	}
+	return nil
+}
+
+func (p *Handle) setNonBlocking() error {
+	// do nothing
+	return nil
+}
+
+// waitForPacket waits for a packet or for the timeout to expire.
+func (p *Handle) waitForPacket() {
+	// can't use select() so instead just switch goroutines
+	runtime.Gosched()
+}
+
+// openOfflineFile returns contents of input file as a *Handle.
+func openOfflineFile(file *os.File) (handle *Handle, err error) {
+	err = LoadWinPCAP()
+	if err != nil {
+		return nil, err
+	}
+
+	buf := make([]byte, errorBufferSize)
+	cf := file.Fd()
+
+	var cptr uintptr
+	if pcapOpenOfflineWithTstampPrecisionPtr == 0 {
+		cptr, _, _ = syscall.Syscall(pcapHopenOfflinePtr, 2, cf, uintptr(unsafe.Pointer(&buf[0])), 0)
+	} else {
+		cptr, _, _ = syscall.Syscall(pcapHOpenOfflineWithTstampPrecisionPtr, 3, cf, uintptr(pcapTstampPrecisionNano), uintptr(unsafe.Pointer(&buf[0])))
+	}
+
+	if cptr == 0 {
+		return nil, errors.New(byteSliceToString(buf))
+	}
+	return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
diff --git a/vendor/github.com/google/gopacket/pcap/test_dns.pcap b/vendor/github.com/google/gopacket/pcap/test_dns.pcap
new file mode 100644
index 0000000..3a79f92
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/test_dns.pcap
Binary files differ
diff --git a/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap b/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap
new file mode 100644
index 0000000..1d01bd9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap
Binary files differ
diff --git a/vendor/github.com/google/gopacket/pcap/test_loopback.pcap b/vendor/github.com/google/gopacket/pcap/test_loopback.pcap
new file mode 100644
index 0000000..ddeb82c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/test_loopback.pcap
Binary files differ
diff --git a/vendor/github.com/google/gopacket/time.go b/vendor/github.com/google/gopacket/time.go
new file mode 100644
index 0000000..6d116cd
--- /dev/null
+++ b/vendor/github.com/google/gopacket/time.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+	"math"
+	"time"
+)
+
+// TimestampResolution represents the resolution of timestamps in Base^Exponent.
+type TimestampResolution struct {
+	Base, Exponent int
+}
+
+func (t TimestampResolution) String() string {
+	return fmt.Sprintf("%d^%d", t.Base, t.Exponent)
+}
+
+// ToDuration returns the smallest representable time difference as a time.Duration
+func (t TimestampResolution) ToDuration() time.Duration {
+	if t.Base == 0 {
+		return 0
+	}
+	if t.Exponent == 0 {
+		return time.Second
+	}
+	switch t.Base {
+	case 10:
+		return time.Duration(math.Pow10(t.Exponent + 9))
+	case 2:
+		if t.Exponent < 0 {
+			return time.Second >> uint(-t.Exponent)
+		}
+		return time.Second << uint(t.Exponent)
+	default:
+		// this might loose precision
+		return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent)))
+	}
+}
+
+// TimestampResolutionInvalid represents an invalid timestamp resolution
+var TimestampResolutionInvalid = TimestampResolution{}
+
+// TimestampResolutionMillisecond is a resolution of 10^-3s
+var TimestampResolutionMillisecond = TimestampResolution{10, -3}
+
+// TimestampResolutionMicrosecond is a resolution of 10^-6s
+var TimestampResolutionMicrosecond = TimestampResolution{10, -6}
+
+// TimestampResolutionNanosecond is a resolution of 10^-9s
+var TimestampResolutionNanosecond = TimestampResolution{10, -9}
+
+// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds
+var TimestampResolutionNTP = TimestampResolution{2, -32}
+
+// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond
+var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond
+
+// PacketSourceResolution is an interface for packet data sources that
+// support reporting the timestamp resolution of the aqcuired timestamps.
+// Returned timestamps will always have NanosecondTimestampResolution due
+// to the use of time.Time, but scaling might have occured if acquired
+// timestamps have a different resolution.
+type PacketSourceResolution interface {
+	// Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+	Resolution() TimestampResolution
+}
diff --git a/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/google/gopacket/writer.go
new file mode 100644
index 0000000..5d303dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/writer.go
@@ -0,0 +1,232 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// SerializableLayer allows its implementations to be written out as a set of bytes,
+// so those bytes may be sent on the wire or otherwise used by the caller.
+// SerializableLayer is implemented by certain Layer types, and can be encoded to
+// bytes using the LayerWriter object.
+type SerializableLayer interface {
+	// SerializeTo writes this layer to a slice, growing that slice if necessary
+	// to make it fit the layer's data.
+	//  Args:
+	//   b:  SerializeBuffer to write this layer on to.  When called, b.Bytes()
+	//     is the payload this layer should wrap, if any.  Note that this
+	//     layer can either prepend itself (common), append itself
+	//     (uncommon), or both (sometimes padding or footers are required at
+	//     the end of packet data). It's also possible (though probably very
+	//     rarely needed) to overwrite any bytes in the current payload.
+	//     After this call, b.Bytes() should return the byte encoding of
+	//     this layer wrapping the original b.Bytes() payload.
+	//   opts:  options to use while writing out data.
+	//  Returns:
+	//   error if a problem was encountered during encoding.  If an error is
+	//   returned, the bytes in data should be considered invalidated, and
+	//   not used.
+	//
+	// SerializeTo calls SHOULD entirely ignore LayerContents and
+	// LayerPayload.  It just serializes based on struct fields, neither
+	// modifying nor using contents/payload.
+	SerializeTo(b SerializeBuffer, opts SerializeOptions) error
+	// LayerType returns the type of the layer that is being serialized to the buffer
+	LayerType() LayerType
+}
+
+// SerializeOptions provides options for behaviors that SerializableLayers may want to
+// implement.
+type SerializeOptions struct {
+	// FixLengths determines whether, during serialization, layers should fix
+	// the values for any length field that depends on the payload.
+	FixLengths bool
+	// ComputeChecksums determines whether, during serialization, layers
+	// should recompute checksums based on their payloads.
+	ComputeChecksums bool
+}
+
+// SerializeBuffer is a helper used by gopacket for writing out packet layers.
+// SerializeBuffer starts off as an empty []byte.  Subsequent calls to PrependBytes
+// return byte slices before the current Bytes(), AppendBytes returns byte
+// slices after.
+//
+// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
+// you want to make sure they're all zeros, set them as such.
+//
+// SerializeBuffer is specifically designed to handle packet writing, where unlike
+// with normal writes it's easier to start writing at the inner-most layer and
+// work out, meaning that we often need to prepend bytes.  This runs counter to
+// typical writes to byte slices using append(), where we only write at the end
+// of the buffer.
+//
+// It can be reused via Clear.  Note, however, that a Clear call will invalidate the
+// byte slices returned by any previous Bytes() call (the same buffer is
+// reused).
+//
+//  1) Reusing a write buffer is generally much faster than creating a new one,
+//     and with the default implementation it avoids additional memory allocations.
+//  2) If a byte slice from a previous Bytes() call will continue to be used,
+//     it's better to create a new SerializeBuffer.
+//
+// The Clear method is specifically designed to minimize memory allocations for
+// similar later workloads on the SerializeBuffer.  IE: if you make a set of
+// Prepend/Append calls, then clear, then make the same calls with the same
+// sizes, the second round (and all future similar rounds) shouldn't allocate
+// any new memory.
+type SerializeBuffer interface {
+	// Bytes returns the contiguous set of bytes collected so far by Prepend/Append
+	// calls.  The slice returned by Bytes will be modified by future Clear calls,
+	// so if you're planning on clearing this SerializeBuffer, you may want to copy
+	// Bytes somewhere safe first.
+	Bytes() []byte
+	// PrependBytes returns a set of bytes which prepends the current bytes in this
+	// buffer.  These bytes start in an indeterminate state, so they should be
+	// overwritten by the caller.  The caller must only call PrependBytes if they
+	// know they're going to immediately overwrite all bytes returned.
+	PrependBytes(num int) ([]byte, error)
+	// AppendBytes returns a set of bytes which appends the current bytes in this
+	// buffer.  These bytes start in an indeterminate state, so they should be
+	// overwritten by the caller.  The caller must only call AppendBytes if they
+	// know they're going to immediately overwrite all bytes returned.
+	AppendBytes(num int) ([]byte, error)
+	// Clear resets the SerializeBuffer to a new, empty buffer.  After a call to clear,
+	// the byte slice returned by any previous call to Bytes() for this buffer
+	// should be considered invalidated.
+	Clear() error
+	// Layers returns all the Layers that have been successfully serialized into this buffer
+	// already.
+	Layers() []LayerType
+	// PushLayer adds the current Layer to the list of Layers that have been serialized
+	// into this buffer.
+	PushLayer(LayerType)
+}
+
+type serializeBuffer struct {
+	data                []byte
+	start               int
+	prepended, appended int
+	layers              []LayerType
+}
+
+// NewSerializeBuffer creates a new instance of the default implementation of
+// the SerializeBuffer interface.
+func NewSerializeBuffer() SerializeBuffer {
+	return &serializeBuffer{}
+}
+
+// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
+// expected number of bytes prepended/appended.  This tends to decrease the
+// number of memory allocations made by the buffer during writes.
+func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
+	return &serializeBuffer{
+		data:      make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
+		start:     expectedPrependLength,
+		prepended: expectedPrependLength,
+		appended:  expectedAppendLength,
+	}
+}
+
+func (w *serializeBuffer) Bytes() []byte {
+	return w.data[w.start:]
+}
+
+func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
+	if num < 0 {
+		panic("num < 0")
+	}
+	if w.start < num {
+		toPrepend := w.prepended
+		if toPrepend < num {
+			toPrepend = num
+		}
+		w.prepended += toPrepend
+		length := cap(w.data) + toPrepend
+		newData := make([]byte, length)
+		newStart := w.start + toPrepend
+		copy(newData[newStart:], w.data[w.start:])
+		w.start = newStart
+		w.data = newData[:toPrepend+len(w.data)]
+	}
+	w.start -= num
+	return w.data[w.start : w.start+num], nil
+}
+
+func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
+	if num < 0 {
+		panic("num < 0")
+	}
+	initialLength := len(w.data)
+	if cap(w.data)-initialLength < num {
+		toAppend := w.appended
+		if toAppend < num {
+			toAppend = num
+		}
+		w.appended += toAppend
+		newData := make([]byte, cap(w.data)+toAppend)
+		copy(newData[w.start:], w.data[w.start:])
+		w.data = newData[:initialLength]
+	}
+	// Grow the buffer.  We know it'll be under capacity given above.
+	w.data = w.data[:initialLength+num]
+	return w.data[initialLength:], nil
+}
+
+func (w *serializeBuffer) Clear() error {
+	w.start = w.prepended
+	w.data = w.data[:w.start]
+	w.layers = w.layers[:0]
+	return nil
+}
+
+func (w *serializeBuffer) Layers() []LayerType {
+	return w.layers
+}
+
+func (w *serializeBuffer) PushLayer(l LayerType) {
+	w.layers = append(w.layers, l)
+}
+
+// SerializeLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other.  Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+//   buf := gopacket.NewSerializeBuffer()
+//   opts := gopacket.SerializeOptions{}
+//   gopacket.SerializeLayers(buf, opts, a, b, c)
+//   firstPayload := buf.Bytes()  // contains byte representation of a(b(c))
+//   gopacket.SerializeLayers(buf, opts, d, e, f)
+//   secondPayload := buf.Bytes()  // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
+	w.Clear()
+	for i := len(layers) - 1; i >= 0; i-- {
+		layer := layers[i]
+		err := layer.SerializeTo(w, opts)
+		if err != nil {
+			return err
+		}
+		w.PushLayer(layer.LayerType())
+	}
+	return nil
+}
+
+// SerializePacket is a convenience function that calls SerializeLayers
+// on packet's Layers().
+// It returns an error if one of the packet layers is not a SerializableLayer.
+func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
+	sls := []SerializableLayer{}
+	for _, layer := range packet.Layers() {
+		sl, ok := layer.(SerializableLayer)
+		if !ok {
+			return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
+		}
+		sls = append(sls, sl)
+	}
+	return SerializeLayers(buf, opts, sls...)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
new file mode 100644
index 0000000..3645162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/BUILD.bazel
new file mode 100644
index 0000000..651177f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/BUILD.bazel
@@ -0,0 +1,26 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//:generators"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "parse_req.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/codegenerator",
+    deps = [
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["parse_req_test.go"],
+    embed = [":go_default_library"],
+    deps = [
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/doc.go
new file mode 100644
index 0000000..3645317
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/doc.go
@@ -0,0 +1,4 @@
+/*
+Package codegenerator contains reusable functions used by the code generators.
+*/
+package codegenerator
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/parse_req.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/parse_req.go
new file mode 100644
index 0000000..e74575b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/codegenerator/parse_req.go
@@ -0,0 +1,23 @@
+package codegenerator
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// ParseRequest parses a code generator request from a proto Message.
+func ParseRequest(r io.Reader) (*plugin.CodeGeneratorRequest, error) {
+	input, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read code generator request: %v", err)
+	}
+	req := new(plugin.CodeGeneratorRequest)
+	if err = proto.Unmarshal(input, req); err != nil {
+		return nil, fmt.Errorf("failed to unmarshal code generator request: %v", err)
+	}
+	return req, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
new file mode 100644
index 0000000..76cafe6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+proto_library(
+    name = "internal_proto",
+    srcs = ["stream_chunk.proto"],
+    deps = ["@com_google_protobuf//:any_proto"],
+)
+
+go_proto_library(
+    name = "internal_go_proto",
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+    proto = ":internal_proto",
+)
+
+go_library(
+    name = "go_default_library",
+    embed = [":internal_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
new file mode 100644
index 0000000..8858f06
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
@@ -0,0 +1,118 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: internal/stream_chunk.proto
+
+package internal
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import any "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+type StreamError struct {
+	GrpcCode             int32      `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
+	HttpCode             int32      `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
+	Message              string     `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+	HttpStatus           string     `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
+	Details              []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *StreamError) Reset()         { *m = StreamError{} }
+func (m *StreamError) String() string { return proto.CompactTextString(m) }
+func (*StreamError) ProtoMessage()    {}
+func (*StreamError) Descriptor() ([]byte, []int) {
+	return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
+}
+func (m *StreamError) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_StreamError.Unmarshal(m, b)
+}
+func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
+}
+func (dst *StreamError) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StreamError.Merge(dst, src)
+}
+func (m *StreamError) XXX_Size() int {
+	return xxx_messageInfo_StreamError.Size(m)
+}
+func (m *StreamError) XXX_DiscardUnknown() {
+	xxx_messageInfo_StreamError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StreamError proto.InternalMessageInfo
+
+func (m *StreamError) GetGrpcCode() int32 {
+	if m != nil {
+		return m.GrpcCode
+	}
+	return 0
+}
+
+func (m *StreamError) GetHttpCode() int32 {
+	if m != nil {
+		return m.HttpCode
+	}
+	return 0
+}
+
+func (m *StreamError) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+func (m *StreamError) GetHttpStatus() string {
+	if m != nil {
+		return m.HttpStatus
+	}
+	return ""
+}
+
+func (m *StreamError) GetDetails() []*any.Any {
+	if m != nil {
+		return m.Details
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
+}
+
+func init() {
+	proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
+}
+
+var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
+	// 223 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
+	0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
+	0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
+	0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
+	0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
+	0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
+	0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
+	0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
+	0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
+	0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
+	0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
+	0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
+	0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
+	0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
new file mode 100644
index 0000000..55f42ce
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package grpc.gateway.runtime;
+option go_package = "internal";
+
+import "google/protobuf/any.proto";
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+message StreamError {
+	int32 grpc_code = 1;
+	int32 http_code = 2;
+	string message = 3;
+	string http_status = 4;
+	repeated google.protobuf.Any details = 5;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/BUILD.bazel
new file mode 100644
index 0000000..cb772ef
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/BUILD.bazel
@@ -0,0 +1,45 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler")
+
+package(default_visibility = ["//visibility:private"])
+
+go_library(
+    name = "go_default_library",
+    srcs = ["main.go"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
+    deps = [
+        "//codegenerator:go_default_library",
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-grpc-gateway/gengateway:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
+
+go_binary(
+    name = "protoc-gen-grpc-gateway",
+    embed = [":go_default_library"],
+    visibility = ["//visibility:public"],
+)
+
+go_proto_compiler(
+    name = "go_gen_grpc_gateway",
+    options = [
+        "logtostderr=true",
+        "allow_repeated_fields_in_body=true",
+    ],
+    plugin = ":protoc-gen-grpc-gateway",
+    suffix = ".pb.gw.go",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//runtime:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//grpclog:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+        "@org_golang_x_net//context:go_default_library",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/BUILD.bazel
new file mode 100644
index 0000000..cfbdc27
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/BUILD.bazel
@@ -0,0 +1,44 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//:generators"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "grpc_api_configuration.go",
+        "grpc_api_service.go",
+        "registry.go",
+        "services.go",
+        "types.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor",
+    deps = [
+        "//protoc-gen-grpc-gateway/httprule:go_default_library",
+        "@com_github_ghodss_yaml//:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
+        "@go_googleapis//google/api:annotations_go_proto",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = [
+        "grpc_api_configuration_test.go",
+        "registry_test.go",
+        "services_test.go",
+        "types_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//protoc-gen-grpc-gateway/httprule:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_configuration.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_configuration.go
new file mode 100644
index 0000000..ca68ed7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_configuration.go
@@ -0,0 +1,71 @@
+package descriptor
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"strings"
+
+	"github.com/ghodss/yaml"
+	"github.com/golang/protobuf/jsonpb"
+)
+
+func loadGrpcAPIServiceFromYAML(yamlFileContents []byte, yamlSourceLogName string) (*GrpcAPIService, error) {
+	jsonContents, err := yaml.YAMLToJSON(yamlFileContents)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to convert gRPC API Configuration from YAML in '%v' to JSON: %v", yamlSourceLogName, err)
+	}
+
+	// As our GrpcAPIService is incomplete accept unkown fields.
+	unmarshaler := jsonpb.Unmarshaler{
+		AllowUnknownFields: true,
+	}
+
+	serviceConfiguration := GrpcAPIService{}
+	if err := unmarshaler.Unmarshal(bytes.NewReader(jsonContents), &serviceConfiguration); err != nil {
+		return nil, fmt.Errorf("Failed to parse gRPC API Configuration from YAML in '%v': %v", yamlSourceLogName, err)
+	}
+
+	return &serviceConfiguration, nil
+}
+
+func registerHTTPRulesFromGrpcAPIService(registry *Registry, service *GrpcAPIService, sourceLogName string) error {
+	if service.HTTP == nil {
+		// Nothing to do
+		return nil
+	}
+
+	for _, rule := range service.HTTP.GetRules() {
+		selector := "." + strings.Trim(rule.GetSelector(), " ")
+		if strings.ContainsAny(selector, "*, ") {
+			return fmt.Errorf("Selector '%v' in %v must specify a single service method without wildcards", rule.GetSelector(), sourceLogName)
+		}
+
+		registry.AddExternalHTTPRule(selector, rule)
+	}
+
+	return nil
+}
+
+// LoadGrpcAPIServiceFromYAML loads a gRPC API Configuration from the given YAML file
+// and registers the HttpRule descriptions contained in it as externalHTTPRules in
+// the given registry. This must be done before loading the proto file.
+//
+// You can learn more about gRPC API Service descriptions from google's documentation
+// at https://cloud.google.com/endpoints/docs/grpc/grpc-service-config
+//
+// Note that for the purposes of the gateway generator we only consider a subset of all
+// available features google supports in their service descriptions.
+func (r *Registry) LoadGrpcAPIServiceFromYAML(yamlFile string) error {
+	yamlFileContents, err := ioutil.ReadFile(yamlFile)
+	if err != nil {
+		return fmt.Errorf("Failed to read gRPC API Configuration description from '%v': %v", yamlFile, err)
+	}
+
+	service, err := loadGrpcAPIServiceFromYAML(yamlFileContents, yamlFile)
+	if err != nil {
+		return err
+	}
+
+	return registerHTTPRulesFromGrpcAPIService(r, service, yamlFile)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_service.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_service.go
new file mode 100644
index 0000000..75b8240
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/grpc_api_service.go
@@ -0,0 +1,31 @@
+package descriptor

+

+import (

+	"github.com/golang/protobuf/proto"

+	"google.golang.org/genproto/googleapis/api/annotations"

+)

+

+// GrpcAPIService represents a stripped down version of google.api.Service .

+// Compare to https://github.com/googleapis/googleapis/blob/master/google/api/service.proto

+// The original imports 23 other protobuf files we are not interested in. If a significant

+// subset (>50%) of these start being reproduced in this file we should swap to using the

+// full generated version instead.

+//

+// For the purposes of the gateway generator we only consider a small subset of all

+// available features google supports in their service descriptions. Thanks to backwards

+// compatibility guarantees by protobuf it is safe for us to remove the other fields.

+// We also only implement the absolute minimum of protobuf generator boilerplate to use

+// our simplified version. These should be pretty stable too.

+type GrpcAPIService struct {

+	// Http Rule. Named Http in the actual proto. Changed to suppress linter warning.

+	HTTP *annotations.Http `protobuf:"bytes,9,opt,name=http" json:"http,omitempty"`

+}

+

+// ProtoMessage returns an empty GrpcAPIService element

+func (*GrpcAPIService) ProtoMessage() {}

+

+// Reset resets the GrpcAPIService

+func (m *GrpcAPIService) Reset() { *m = GrpcAPIService{} }

+

+// String returns the string representation of the GrpcAPIService

+func (m *GrpcAPIService) String() string { return proto.CompactTextString(m) }

diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go
new file mode 100644
index 0000000..2f05636
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go
@@ -0,0 +1,498 @@
+package descriptor
+
+import (
+	"fmt"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/golang/glog"
+	descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"google.golang.org/genproto/googleapis/api/annotations"
+)
+
+// Registry is a registry of information extracted from plugin.CodeGeneratorRequest.
+type Registry struct {
+	// msgs is a mapping from fully-qualified message name to descriptor
+	msgs map[string]*Message
+
+	// enums is a mapping from fully-qualified enum name to descriptor
+	enums map[string]*Enum
+
+	// files is a mapping from file path to descriptor
+	files map[string]*File
+
+	// prefix is a prefix to be inserted to golang package paths generated from proto package names.
+	prefix string
+
+	// importPath is used as the package if no input files declare go_package. If it contains slashes, everything up to the rightmost slash is ignored.
+	importPath string
+
+	// pkgMap is a user-specified mapping from file path to proto package.
+	pkgMap map[string]string
+
+	// pkgAliases is a mapping from package aliases to package paths in go which are already taken.
+	pkgAliases map[string]string
+
+	// allowDeleteBody permits http delete methods to have a body
+	allowDeleteBody bool
+
+	// externalHttpRules is a mapping from fully qualified service method names to additional HttpRules applicable besides the ones found in annotations.
+	externalHTTPRules map[string][]*annotations.HttpRule
+
+	// allowMerge generation one swagger file out of multiple protos
+	allowMerge bool
+
+	// mergeFileName target swagger file name after merge
+	mergeFileName string
+
+	// allowRepeatedFieldsInBody permits repeated field in body field path of `google.api.http` annotation option
+	allowRepeatedFieldsInBody bool
+
+	// includePackageInTags controls whether the package name defined in the `package` directive
+	// in the proto file can be prepended to the gRPC service name in the `Tags` field of every operation.
+	includePackageInTags bool
+
+	// repeatedPathParamSeparator specifies how path parameter repeated fields are separated
+	repeatedPathParamSeparator repeatedFieldSeparator
+
+	// useJSONNamesForFields if true json tag name is used for generating fields in swagger definitions,
+	// otherwise the original proto name is used. It's helpful for synchronizing the swagger definition
+	// with grpc-gateway response, if it uses json tags for marshaling.
+	useJSONNamesForFields bool
+
+	// useFQNForSwaggerName if true swagger names will use the full qualified name (FQN) from proto definition,
+	// and generate a dot-separated swagger name concatenating all elements from the proto FQN.
+	// If false, the default behavior is to concat the last 2 elements of the FQN if they are unique, otherwise concat
+	// all the elements of the FQN without any separator
+	useFQNForSwaggerName bool
+
+	// allowColonFinalSegments determines whether colons are permitted
+	// in the final segment of a path.
+	allowColonFinalSegments bool
+}
+
+type repeatedFieldSeparator struct {
+	name string
+	sep  rune
+}
+
+// NewRegistry returns a new Registry.
+func NewRegistry() *Registry {
+	return &Registry{
+		msgs:              make(map[string]*Message),
+		enums:             make(map[string]*Enum),
+		files:             make(map[string]*File),
+		pkgMap:            make(map[string]string),
+		pkgAliases:        make(map[string]string),
+		externalHTTPRules: make(map[string][]*annotations.HttpRule),
+		repeatedPathParamSeparator: repeatedFieldSeparator{
+			name: "csv",
+			sep:  ',',
+		},
+	}
+}
+
+// Load loads definitions of services, methods, messages, enumerations and fields from "req".
+func (r *Registry) Load(req *plugin.CodeGeneratorRequest) error {
+	for _, file := range req.GetProtoFile() {
+		r.loadFile(file)
+	}
+
+	var targetPkg string
+	for _, name := range req.FileToGenerate {
+		target := r.files[name]
+		if target == nil {
+			return fmt.Errorf("no such file: %s", name)
+		}
+		name := r.packageIdentityName(target.FileDescriptorProto)
+		if targetPkg == "" {
+			targetPkg = name
+		} else {
+			if targetPkg != name {
+				return fmt.Errorf("inconsistent package names: %s %s", targetPkg, name)
+			}
+		}
+
+		if err := r.loadServices(target); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// loadFile loads messages, enumerations and fields from "file".
+// It does not loads services and methods in "file".  You need to call
+// loadServices after loadFiles is called for all files to load services and methods.
+func (r *Registry) loadFile(file *descriptor.FileDescriptorProto) {
+	pkg := GoPackage{
+		Path: r.goPackagePath(file),
+		Name: r.defaultGoPackageName(file),
+	}
+	if err := r.ReserveGoPackageAlias(pkg.Name, pkg.Path); err != nil {
+		for i := 0; ; i++ {
+			alias := fmt.Sprintf("%s_%d", pkg.Name, i)
+			if err := r.ReserveGoPackageAlias(alias, pkg.Path); err == nil {
+				pkg.Alias = alias
+				break
+			}
+		}
+	}
+	f := &File{
+		FileDescriptorProto: file,
+		GoPkg:               pkg,
+	}
+
+	r.files[file.GetName()] = f
+	r.registerMsg(f, nil, file.GetMessageType())
+	r.registerEnum(f, nil, file.GetEnumType())
+}
+
+func (r *Registry) registerMsg(file *File, outerPath []string, msgs []*descriptor.DescriptorProto) {
+	for i, md := range msgs {
+		m := &Message{
+			File:            file,
+			Outers:          outerPath,
+			DescriptorProto: md,
+			Index:           i,
+		}
+		for _, fd := range md.GetField() {
+			m.Fields = append(m.Fields, &Field{
+				Message:              m,
+				FieldDescriptorProto: fd,
+			})
+		}
+		file.Messages = append(file.Messages, m)
+		r.msgs[m.FQMN()] = m
+		glog.V(1).Infof("register name: %s", m.FQMN())
+
+		var outers []string
+		outers = append(outers, outerPath...)
+		outers = append(outers, m.GetName())
+		r.registerMsg(file, outers, m.GetNestedType())
+		r.registerEnum(file, outers, m.GetEnumType())
+	}
+}
+
+func (r *Registry) registerEnum(file *File, outerPath []string, enums []*descriptor.EnumDescriptorProto) {
+	for i, ed := range enums {
+		e := &Enum{
+			File:                file,
+			Outers:              outerPath,
+			EnumDescriptorProto: ed,
+			Index:               i,
+		}
+		file.Enums = append(file.Enums, e)
+		r.enums[e.FQEN()] = e
+		glog.V(1).Infof("register enum name: %s", e.FQEN())
+	}
+}
+
+// LookupMsg looks up a message type by "name".
+// It tries to resolve "name" from "location" if "name" is a relative message name.
+func (r *Registry) LookupMsg(location, name string) (*Message, error) {
+	glog.V(1).Infof("lookup %s from %s", name, location)
+	if strings.HasPrefix(name, ".") {
+		m, ok := r.msgs[name]
+		if !ok {
+			return nil, fmt.Errorf("no message found: %s", name)
+		}
+		return m, nil
+	}
+
+	if !strings.HasPrefix(location, ".") {
+		location = fmt.Sprintf(".%s", location)
+	}
+	components := strings.Split(location, ".")
+	for len(components) > 0 {
+		fqmn := strings.Join(append(components, name), ".")
+		if m, ok := r.msgs[fqmn]; ok {
+			return m, nil
+		}
+		components = components[:len(components)-1]
+	}
+	return nil, fmt.Errorf("no message found: %s", name)
+}
+
+// LookupEnum looks up a enum type by "name".
+// It tries to resolve "name" from "location" if "name" is a relative enum name.
+func (r *Registry) LookupEnum(location, name string) (*Enum, error) {
+	glog.V(1).Infof("lookup enum %s from %s", name, location)
+	if strings.HasPrefix(name, ".") {
+		e, ok := r.enums[name]
+		if !ok {
+			return nil, fmt.Errorf("no enum found: %s", name)
+		}
+		return e, nil
+	}
+
+	if !strings.HasPrefix(location, ".") {
+		location = fmt.Sprintf(".%s", location)
+	}
+	components := strings.Split(location, ".")
+	for len(components) > 0 {
+		fqen := strings.Join(append(components, name), ".")
+		if e, ok := r.enums[fqen]; ok {
+			return e, nil
+		}
+		components = components[:len(components)-1]
+	}
+	return nil, fmt.Errorf("no enum found: %s", name)
+}
+
+// LookupFile looks up a file by name.
+func (r *Registry) LookupFile(name string) (*File, error) {
+	f, ok := r.files[name]
+	if !ok {
+		return nil, fmt.Errorf("no such file given: %s", name)
+	}
+	return f, nil
+}
+
+// LookupExternalHTTPRules looks up external http rules by fully qualified service method name
+func (r *Registry) LookupExternalHTTPRules(qualifiedMethodName string) []*annotations.HttpRule {
+	return r.externalHTTPRules[qualifiedMethodName]
+}
+
+// AddExternalHTTPRule adds an external http rule for the given fully qualified service method name
+func (r *Registry) AddExternalHTTPRule(qualifiedMethodName string, rule *annotations.HttpRule) {
+	r.externalHTTPRules[qualifiedMethodName] = append(r.externalHTTPRules[qualifiedMethodName], rule)
+}
+
+// AddPkgMap adds a mapping from a .proto file to proto package name.
+func (r *Registry) AddPkgMap(file, protoPkg string) {
+	r.pkgMap[file] = protoPkg
+}
+
+// SetPrefix registers the prefix to be added to go package paths generated from proto package names.
+func (r *Registry) SetPrefix(prefix string) {
+	r.prefix = prefix
+}
+
+// SetImportPath registers the importPath which is used as the package if no
+// input files declare go_package. If it contains slashes, everything up to the
+// rightmost slash is ignored.
+func (r *Registry) SetImportPath(importPath string) {
+	r.importPath = importPath
+}
+
+// ReserveGoPackageAlias reserves the unique alias of go package.
+// If succeeded, the alias will be never used for other packages in generated go files.
+// If failed, the alias is already taken by another package, so you need to use another
+// alias for the package in your go files.
+func (r *Registry) ReserveGoPackageAlias(alias, pkgpath string) error {
+	if taken, ok := r.pkgAliases[alias]; ok {
+		if taken == pkgpath {
+			return nil
+		}
+		return fmt.Errorf("package name %s is already taken. Use another alias", alias)
+	}
+	r.pkgAliases[alias] = pkgpath
+	return nil
+}
+
+// goPackagePath returns the go package path which go files generated from "f" should have.
+// It respects the mapping registered by AddPkgMap if exists. Or use go_package as import path
+// if it includes a slash,  Otherwide, it generates a path from the file name of "f".
+func (r *Registry) goPackagePath(f *descriptor.FileDescriptorProto) string {
+	name := f.GetName()
+	if pkg, ok := r.pkgMap[name]; ok {
+		return path.Join(r.prefix, pkg)
+	}
+
+	gopkg := f.Options.GetGoPackage()
+	idx := strings.LastIndex(gopkg, "/")
+	if idx >= 0 {
+		if sc := strings.LastIndex(gopkg, ";"); sc > 0 {
+			gopkg = gopkg[:sc+1-1]
+		}
+		return gopkg
+	}
+
+	return path.Join(r.prefix, path.Dir(name))
+}
+
+// GetAllFQMNs returns a list of all FQMNs
+func (r *Registry) GetAllFQMNs() []string {
+	var keys []string
+	for k := range r.msgs {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// GetAllFQENs returns a list of all FQENs
+func (r *Registry) GetAllFQENs() []string {
+	var keys []string
+	for k := range r.enums {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// SetAllowDeleteBody controls whether http delete methods may have a
+// body or fail loading if encountered.
+func (r *Registry) SetAllowDeleteBody(allow bool) {
+	r.allowDeleteBody = allow
+}
+
+// SetAllowMerge controls whether generation one swagger file out of multiple protos
+func (r *Registry) SetAllowMerge(allow bool) {
+	r.allowMerge = allow
+}
+
+// IsAllowMerge whether generation one swagger file out of multiple protos
+func (r *Registry) IsAllowMerge() bool {
+	return r.allowMerge
+}
+
+// SetMergeFileName controls the target swagger file name out of multiple protos
+func (r *Registry) SetMergeFileName(mergeFileName string) {
+	r.mergeFileName = mergeFileName
+}
+
+// SetAllowRepeatedFieldsInBody controls whether repeated field can be used
+// in `body` and `response_body` (`google.api.http` annotation option) field path or not
+func (r *Registry) SetAllowRepeatedFieldsInBody(allow bool) {
+	r.allowRepeatedFieldsInBody = allow
+}
+
+// IsAllowRepeatedFieldsInBody checks if repeated field can be used
+// in `body` and `response_body` (`google.api.http` annotation option) field path or not
+func (r *Registry) IsAllowRepeatedFieldsInBody() bool {
+	return r.allowRepeatedFieldsInBody
+}
+
+// SetIncludePackageInTags controls whether the package name defined in the `package` directive
+// in the proto file can be prepended to the gRPC service name in the `Tags` field of every operation.
+func (r *Registry) SetIncludePackageInTags(allow bool) {
+	r.includePackageInTags = allow
+}
+
+// IsIncludePackageInTags checks whether the package name defined in the `package` directive
+// in the proto file can be prepended to the gRPC service name in the `Tags` field of every operation.
+func (r *Registry) IsIncludePackageInTags() bool {
+	return r.includePackageInTags
+}
+
+// GetRepeatedPathParamSeparator returns a rune spcifying how
+// path parameter repeated fields are separated.
+func (r *Registry) GetRepeatedPathParamSeparator() rune {
+	return r.repeatedPathParamSeparator.sep
+}
+
+// GetRepeatedPathParamSeparatorName returns the name path parameter repeated
+// fields repeatedFieldSeparator. I.e. 'csv', 'pipe', 'ssv' or 'tsv'
+func (r *Registry) GetRepeatedPathParamSeparatorName() string {
+	return r.repeatedPathParamSeparator.name
+}
+
+// SetRepeatedPathParamSeparator sets how path parameter repeated fields are
+// separated. Allowed names are 'csv', 'pipe', 'ssv' and 'tsv'.
+func (r *Registry) SetRepeatedPathParamSeparator(name string) error {
+	var sep rune
+	switch name {
+	case "csv":
+		sep = ','
+	case "pipes":
+		sep = '|'
+	case "ssv":
+		sep = ' '
+	case "tsv":
+		sep = '\t'
+	default:
+		return fmt.Errorf("unknown repeated path parameter separator: %s", name)
+	}
+	r.repeatedPathParamSeparator = repeatedFieldSeparator{
+		name: name,
+		sep:  sep,
+	}
+	return nil
+}
+
+// SetUseJSONNamesForFields sets useJSONNamesForFields
+func (r *Registry) SetUseJSONNamesForFields(use bool) {
+	r.useJSONNamesForFields = use
+}
+
+// GetUseJSONNamesForFields returns useJSONNamesForFields
+func (r *Registry) GetUseJSONNamesForFields() bool {
+	return r.useJSONNamesForFields
+}
+
+// SetUseFQNForSwaggerName sets useFQNForSwaggerName
+func (r *Registry) SetUseFQNForSwaggerName(use bool) {
+	r.useFQNForSwaggerName = use
+}
+
+// GetAllowColonFinalSegments returns allowColonFinalSegments
+func (r *Registry) GetAllowColonFinalSegments() bool {
+	return r.allowColonFinalSegments
+}
+
+// SetAllowColonFinalSegments sets allowColonFinalSegments
+func (r *Registry) SetAllowColonFinalSegments(use bool) {
+	r.allowColonFinalSegments = use
+}
+
+// GetUseFQNForSwaggerName returns useFQNForSwaggerName
+func (r *Registry) GetUseFQNForSwaggerName() bool {
+	return r.useFQNForSwaggerName
+}
+
+// GetMergeFileName return the target merge swagger file name
+func (r *Registry) GetMergeFileName() string {
+	return r.mergeFileName
+}
+
+// sanitizePackageName replaces unallowed character in package name
+// with allowed character.
+func sanitizePackageName(pkgName string) string {
+	pkgName = strings.Replace(pkgName, ".", "_", -1)
+	pkgName = strings.Replace(pkgName, "-", "_", -1)
+	return pkgName
+}
+
+// defaultGoPackageName returns the default go package name to be used for go files generated from "f".
+// You might need to use an unique alias for the package when you import it.  Use ReserveGoPackageAlias to get a unique alias.
+func (r *Registry) defaultGoPackageName(f *descriptor.FileDescriptorProto) string {
+	name := r.packageIdentityName(f)
+	return sanitizePackageName(name)
+}
+
+// packageIdentityName returns the identity of packages.
+// protoc-gen-grpc-gateway rejects CodeGenerationRequests which contains more than one packages
+// as protoc-gen-go does.
+func (r *Registry) packageIdentityName(f *descriptor.FileDescriptorProto) string {
+	if f.Options != nil && f.Options.GoPackage != nil {
+		gopkg := f.Options.GetGoPackage()
+		idx := strings.LastIndex(gopkg, "/")
+		if idx < 0 {
+			gopkg = gopkg[idx+1:]
+		}
+
+		gopkg = gopkg[idx+1:]
+		// package name is overrided with the string after the
+		// ';' character
+		sc := strings.IndexByte(gopkg, ';')
+		if sc < 0 {
+			return sanitizePackageName(gopkg)
+
+		}
+		return sanitizePackageName(gopkg[sc+1:])
+	}
+	if p := r.importPath; len(p) != 0 {
+		if i := strings.LastIndex(p, "/"); i >= 0 {
+			p = p[i+1:]
+		}
+		return p
+	}
+
+	if f.Package == nil {
+		base := filepath.Base(f.GetName())
+		ext := filepath.Ext(base)
+		return strings.TrimSuffix(base, ext)
+	}
+	return f.GetPackage()
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go
new file mode 100644
index 0000000..8916d31
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go
@@ -0,0 +1,304 @@
+package descriptor
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/golang/protobuf/proto"
+	descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule"
+	options "google.golang.org/genproto/googleapis/api/annotations"
+)
+
+// loadServices registers services and their methods from "targetFile" to "r".
+// It must be called after loadFile is called for all files so that loadServices
+// can resolve names of message types and their fields.
+func (r *Registry) loadServices(file *File) error {
+	glog.V(1).Infof("Loading services from %s", file.GetName())
+	var svcs []*Service
+	for _, sd := range file.GetService() {
+		glog.V(2).Infof("Registering %s", sd.GetName())
+		svc := &Service{
+			File: file,
+			ServiceDescriptorProto: sd,
+		}
+		for _, md := range sd.GetMethod() {
+			glog.V(2).Infof("Processing %s.%s", sd.GetName(), md.GetName())
+			opts, err := extractAPIOptions(md)
+			if err != nil {
+				glog.Errorf("Failed to extract HttpRule from %s.%s: %v", svc.GetName(), md.GetName(), err)
+				return err
+			}
+			optsList := r.LookupExternalHTTPRules((&Method{Service: svc, MethodDescriptorProto: md}).FQMN())
+			if opts != nil {
+				optsList = append(optsList, opts)
+			}
+			if len(optsList) == 0 {
+				glog.V(1).Infof("Found non-target method: %s.%s", svc.GetName(), md.GetName())
+			}
+			meth, err := r.newMethod(svc, md, optsList)
+			if err != nil {
+				return err
+			}
+			svc.Methods = append(svc.Methods, meth)
+		}
+		if len(svc.Methods) == 0 {
+			continue
+		}
+		glog.V(2).Infof("Registered %s with %d method(s)", svc.GetName(), len(svc.Methods))
+		svcs = append(svcs, svc)
+	}
+	file.Services = svcs
+	return nil
+}
+
+func (r *Registry) newMethod(svc *Service, md *descriptor.MethodDescriptorProto, optsList []*options.HttpRule) (*Method, error) {
+	requestType, err := r.LookupMsg(svc.File.GetPackage(), md.GetInputType())
+	if err != nil {
+		return nil, err
+	}
+	responseType, err := r.LookupMsg(svc.File.GetPackage(), md.GetOutputType())
+	if err != nil {
+		return nil, err
+	}
+	meth := &Method{
+		Service:               svc,
+		MethodDescriptorProto: md,
+		RequestType:           requestType,
+		ResponseType:          responseType,
+	}
+
+	newBinding := func(opts *options.HttpRule, idx int) (*Binding, error) {
+		var (
+			httpMethod   string
+			pathTemplate string
+		)
+		switch {
+		case opts.GetGet() != "":
+			httpMethod = "GET"
+			pathTemplate = opts.GetGet()
+			if opts.Body != "" {
+				return nil, fmt.Errorf("must not set request body when http method is GET: %s", md.GetName())
+			}
+
+		case opts.GetPut() != "":
+			httpMethod = "PUT"
+			pathTemplate = opts.GetPut()
+
+		case opts.GetPost() != "":
+			httpMethod = "POST"
+			pathTemplate = opts.GetPost()
+
+		case opts.GetDelete() != "":
+			httpMethod = "DELETE"
+			pathTemplate = opts.GetDelete()
+			if opts.Body != "" && !r.allowDeleteBody {
+				return nil, fmt.Errorf("must not set request body when http method is DELETE except allow_delete_body option is true: %s", md.GetName())
+			}
+
+		case opts.GetPatch() != "":
+			httpMethod = "PATCH"
+			pathTemplate = opts.GetPatch()
+
+		case opts.GetCustom() != nil:
+			custom := opts.GetCustom()
+			httpMethod = custom.Kind
+			pathTemplate = custom.Path
+
+		default:
+			glog.V(1).Infof("No pattern specified in google.api.HttpRule: %s", md.GetName())
+			return nil, nil
+		}
+
+		parsed, err := httprule.Parse(pathTemplate)
+		if err != nil {
+			return nil, err
+		}
+		tmpl := parsed.Compile()
+
+		if md.GetClientStreaming() && len(tmpl.Fields) > 0 {
+			return nil, fmt.Errorf("cannot use path parameter in client streaming")
+		}
+
+		b := &Binding{
+			Method:     meth,
+			Index:      idx,
+			PathTmpl:   tmpl,
+			HTTPMethod: httpMethod,
+		}
+
+		for _, f := range tmpl.Fields {
+			param, err := r.newParam(meth, f)
+			if err != nil {
+				return nil, err
+			}
+			b.PathParams = append(b.PathParams, param)
+		}
+
+		// TODO(yugui) Handle query params
+
+		b.Body, err = r.newBody(meth, opts.Body)
+		if err != nil {
+			return nil, err
+		}
+
+		b.ResponseBody, err = r.newResponse(meth, opts.ResponseBody)
+		if err != nil {
+			return nil, err
+		}
+
+		return b, nil
+	}
+
+	applyOpts := func(opts *options.HttpRule) error {
+		b, err := newBinding(opts, len(meth.Bindings))
+		if err != nil {
+			return err
+		}
+
+		if b != nil {
+			meth.Bindings = append(meth.Bindings, b)
+		}
+		for _, additional := range opts.GetAdditionalBindings() {
+			if len(additional.AdditionalBindings) > 0 {
+				return fmt.Errorf("additional_binding in additional_binding not allowed: %s.%s", svc.GetName(), meth.GetName())
+			}
+			b, err := newBinding(additional, len(meth.Bindings))
+			if err != nil {
+				return err
+			}
+			meth.Bindings = append(meth.Bindings, b)
+		}
+
+		return nil
+	}
+
+	for _, opts := range optsList {
+		if err := applyOpts(opts); err != nil {
+			return nil, err
+		}
+	}
+
+	return meth, nil
+}
+
+func extractAPIOptions(meth *descriptor.MethodDescriptorProto) (*options.HttpRule, error) {
+	if meth.Options == nil {
+		return nil, nil
+	}
+	if !proto.HasExtension(meth.Options, options.E_Http) {
+		return nil, nil
+	}
+	ext, err := proto.GetExtension(meth.Options, options.E_Http)
+	if err != nil {
+		return nil, err
+	}
+	opts, ok := ext.(*options.HttpRule)
+	if !ok {
+		return nil, fmt.Errorf("extension is %T; want an HttpRule", ext)
+	}
+	return opts, nil
+}
+
+func (r *Registry) newParam(meth *Method, path string) (Parameter, error) {
+	msg := meth.RequestType
+	fields, err := r.resolveFieldPath(msg, path, true)
+	if err != nil {
+		return Parameter{}, err
+	}
+	l := len(fields)
+	if l == 0 {
+		return Parameter{}, fmt.Errorf("invalid field access list for %s", path)
+	}
+	target := fields[l-1].Target
+	switch target.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_GROUP:
+		glog.V(2).Infoln("found aggregate type:", target, target.TypeName)
+		if IsWellKnownType(*target.TypeName) {
+			glog.V(2).Infoln("found well known aggregate type:", target)
+		} else {
+			return Parameter{}, fmt.Errorf("aggregate type %s in parameter of %s.%s: %s", target.Type, meth.Service.GetName(), meth.GetName(), path)
+		}
+	}
+	return Parameter{
+		FieldPath: FieldPath(fields),
+		Method:    meth,
+		Target:    fields[l-1].Target,
+	}, nil
+}
+
+func (r *Registry) newBody(meth *Method, path string) (*Body, error) {
+	msg := meth.RequestType
+	switch path {
+	case "":
+		return nil, nil
+	case "*":
+		return &Body{FieldPath: nil}, nil
+	}
+	fields, err := r.resolveFieldPath(msg, path, false)
+	if err != nil {
+		return nil, err
+	}
+	return &Body{FieldPath: FieldPath(fields)}, nil
+}
+
+func (r *Registry) newResponse(meth *Method, path string) (*Body, error) {
+	msg := meth.ResponseType
+	switch path {
+	case "", "*":
+		return nil, nil
+	}
+	fields, err := r.resolveFieldPath(msg, path, false)
+	if err != nil {
+		return nil, err
+	}
+	return &Body{FieldPath: FieldPath(fields)}, nil
+}
+
+// lookupField looks up a field named "name" within "msg".
+// It returns nil if no such field found.
+func lookupField(msg *Message, name string) *Field {
+	for _, f := range msg.Fields {
+		if f.GetName() == name {
+			return f
+		}
+	}
+	return nil
+}
+
+// resolveFieldPath resolves "path" into a list of fieldDescriptor, starting from "msg".
+func (r *Registry) resolveFieldPath(msg *Message, path string, isPathParam bool) ([]FieldPathComponent, error) {
+	if path == "" {
+		return nil, nil
+	}
+
+	root := msg
+	var result []FieldPathComponent
+	for i, c := range strings.Split(path, ".") {
+		if i > 0 {
+			f := result[i-1].Target
+			switch f.GetType() {
+			case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_GROUP:
+				var err error
+				msg, err = r.LookupMsg(msg.FQMN(), f.GetTypeName())
+				if err != nil {
+					return nil, err
+				}
+			default:
+				return nil, fmt.Errorf("not an aggregate type: %s in %s", f.GetName(), path)
+			}
+		}
+
+		glog.V(2).Infof("Lookup %s in %s", c, msg.FQMN())
+		f := lookupField(msg, c)
+		if f == nil {
+			return nil, fmt.Errorf("no field %q found in %s", path, root.GetName())
+		}
+		if !(isPathParam || r.allowRepeatedFieldsInBody) && f.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED {
+			return nil, fmt.Errorf("repeated field not allowed in field path: %s in %s", f.GetName(), path)
+		}
+		result = append(result, FieldPathComponent{Name: c, Target: f})
+	}
+	return result, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/types.go
new file mode 100644
index 0000000..4aa75f8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/types.go
@@ -0,0 +1,466 @@
+package descriptor
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	gogen "github.com/golang/protobuf/protoc-gen-go/generator"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule"
+)
+
+// IsWellKnownType returns true if the provided fully qualified type name is considered 'well-known'.
+func IsWellKnownType(typeName string) bool {
+	_, ok := wellKnownTypeConv[typeName]
+	return ok
+}
+
+// GoPackage represents a golang package
+type GoPackage struct {
+	// Path is the package path to the package.
+	Path string
+	// Name is the package name of the package
+	Name string
+	// Alias is an alias of the package unique within the current invokation of grpc-gateway generator.
+	Alias string
+}
+
+// Standard returns whether the import is a golang standard package.
+func (p GoPackage) Standard() bool {
+	return !strings.Contains(p.Path, ".")
+}
+
+// String returns a string representation of this package in the form of import line in golang.
+func (p GoPackage) String() string {
+	if p.Alias == "" {
+		return fmt.Sprintf("%q", p.Path)
+	}
+	return fmt.Sprintf("%s %q", p.Alias, p.Path)
+}
+
+// File wraps descriptor.FileDescriptorProto for richer features.
+type File struct {
+	*descriptor.FileDescriptorProto
+	// GoPkg is the go package of the go file generated from this file..
+	GoPkg GoPackage
+	// Messages is the list of messages defined in this file.
+	Messages []*Message
+	// Enums is the list of enums defined in this file.
+	Enums []*Enum
+	// Services is the list of services defined in this file.
+	Services []*Service
+}
+
+// proto2 determines if the syntax of the file is proto2.
+func (f *File) proto2() bool {
+	return f.Syntax == nil || f.GetSyntax() == "proto2"
+}
+
+// Message describes a protocol buffer message types
+type Message struct {
+	// File is the file where the message is defined
+	File *File
+	// Outers is a list of outer messages if this message is a nested type.
+	Outers []string
+	*descriptor.DescriptorProto
+	Fields []*Field
+
+	// Index is proto path index of this message in File.
+	Index int
+}
+
+// FQMN returns a fully qualified message name of this message.
+func (m *Message) FQMN() string {
+	components := []string{""}
+	if m.File.Package != nil {
+		components = append(components, m.File.GetPackage())
+	}
+	components = append(components, m.Outers...)
+	components = append(components, m.GetName())
+	return strings.Join(components, ".")
+}
+
+// GoType returns a go type name for the message type.
+// It prefixes the type name with the package alias if
+// its belonging package is not "currentPackage".
+func (m *Message) GoType(currentPackage string) string {
+	var components []string
+	components = append(components, m.Outers...)
+	components = append(components, m.GetName())
+
+	name := strings.Join(components, "_")
+	if m.File.GoPkg.Path == currentPackage {
+		return name
+	}
+	pkg := m.File.GoPkg.Name
+	if alias := m.File.GoPkg.Alias; alias != "" {
+		pkg = alias
+	}
+	return fmt.Sprintf("%s.%s", pkg, name)
+}
+
+// Enum describes a protocol buffer enum types
+type Enum struct {
+	// File is the file where the enum is defined
+	File *File
+	// Outers is a list of outer messages if this enum is a nested type.
+	Outers []string
+	*descriptor.EnumDescriptorProto
+
+	Index int
+}
+
+// FQEN returns a fully qualified enum name of this enum.
+func (e *Enum) FQEN() string {
+	components := []string{""}
+	if e.File.Package != nil {
+		components = append(components, e.File.GetPackage())
+	}
+	components = append(components, e.Outers...)
+	components = append(components, e.GetName())
+	return strings.Join(components, ".")
+}
+
+// GoType returns a go type name for the enum type.
+// It prefixes the type name with the package alias if
+// its belonging package is not "currentPackage".
+func (e *Enum) GoType(currentPackage string) string {
+	var components []string
+	components = append(components, e.Outers...)
+	components = append(components, e.GetName())
+
+	name := strings.Join(components, "_")
+	if e.File.GoPkg.Path == currentPackage {
+		return name
+	}
+	pkg := e.File.GoPkg.Name
+	if alias := e.File.GoPkg.Alias; alias != "" {
+		pkg = alias
+	}
+	return fmt.Sprintf("%s.%s", pkg, name)
+}
+
+// Service wraps descriptor.ServiceDescriptorProto for richer features.
+type Service struct {
+	// File is the file where this service is defined.
+	File *File
+	*descriptor.ServiceDescriptorProto
+	// Methods is the list of methods defined in this service.
+	Methods []*Method
+}
+
+// FQSN returns the fully qualified service name of this service.
+func (s *Service) FQSN() string {
+	components := []string{""}
+	if s.File.Package != nil {
+		components = append(components, s.File.GetPackage())
+	}
+	components = append(components, s.GetName())
+	return strings.Join(components, ".")
+}
+
+// Method wraps descriptor.MethodDescriptorProto for richer features.
+type Method struct {
+	// Service is the service which this method belongs to.
+	Service *Service
+	*descriptor.MethodDescriptorProto
+
+	// RequestType is the message type of requests to this method.
+	RequestType *Message
+	// ResponseType is the message type of responses from this method.
+	ResponseType *Message
+	Bindings     []*Binding
+}
+
+// FQMN returns a fully qualified rpc method name of this method.
+func (m *Method) FQMN() string {
+	components := []string{}
+	components = append(components, m.Service.FQSN())
+	components = append(components, m.GetName())
+	return strings.Join(components, ".")
+}
+
+// Binding describes how an HTTP endpoint is bound to a gRPC method.
+type Binding struct {
+	// Method is the method which the endpoint is bound to.
+	Method *Method
+	// Index is a zero-origin index of the binding in the target method
+	Index int
+	// PathTmpl is path template where this method is mapped to.
+	PathTmpl httprule.Template
+	// HTTPMethod is the HTTP method which this method is mapped to.
+	HTTPMethod string
+	// PathParams is the list of parameters provided in HTTP request paths.
+	PathParams []Parameter
+	// Body describes parameters provided in HTTP request body.
+	Body *Body
+	// ResponseBody describes field in response struct to marshal in HTTP response body.
+	ResponseBody *Body
+}
+
+// ExplicitParams returns a list of explicitly bound parameters of "b",
+// i.e. a union of field path for body and field paths for path parameters.
+func (b *Binding) ExplicitParams() []string {
+	var result []string
+	if b.Body != nil {
+		result = append(result, b.Body.FieldPath.String())
+	}
+	for _, p := range b.PathParams {
+		result = append(result, p.FieldPath.String())
+	}
+	return result
+}
+
+// Field wraps descriptor.FieldDescriptorProto for richer features.
+type Field struct {
+	// Message is the message type which this field belongs to.
+	Message *Message
+	// FieldMessage is the message type of the field.
+	FieldMessage *Message
+	*descriptor.FieldDescriptorProto
+}
+
+// Parameter is a parameter provided in http requests
+type Parameter struct {
+	// FieldPath is a path to a proto field which this parameter is mapped to.
+	FieldPath
+	// Target is the proto field which this parameter is mapped to.
+	Target *Field
+	// Method is the method which this parameter is used for.
+	Method *Method
+}
+
+// ConvertFuncExpr returns a go expression of a converter function.
+// The converter function converts a string into a value for the parameter.
+func (p Parameter) ConvertFuncExpr() (string, error) {
+	tbl := proto3ConvertFuncs
+	if !p.IsProto2() && p.IsRepeated() {
+		tbl = proto3RepeatedConvertFuncs
+	} else if p.IsProto2() && !p.IsRepeated() {
+		tbl = proto2ConvertFuncs
+	} else if p.IsProto2() && p.IsRepeated() {
+		tbl = proto2RepeatedConvertFuncs
+	}
+	typ := p.Target.GetType()
+	conv, ok := tbl[typ]
+	if !ok {
+		conv, ok = wellKnownTypeConv[p.Target.GetTypeName()]
+	}
+	if !ok {
+		return "", fmt.Errorf("unsupported field type %s of parameter %s in %s.%s", typ, p.FieldPath, p.Method.Service.GetName(), p.Method.GetName())
+	}
+	return conv, nil
+}
+
+// IsEnum returns true if the field is an enum type, otherwise false is returned.
+func (p Parameter) IsEnum() bool {
+	return p.Target.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM
+}
+
+// IsRepeated returns true if the field is repeated, otherwise false is returned.
+func (p Parameter) IsRepeated() bool {
+	return p.Target.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsProto2 returns true if the field is proto2, otherwise false is returned.
+func (p Parameter) IsProto2() bool {
+	return p.Target.Message.File.proto2()
+}
+
+// Body describes a http (request|response) body to be sent to the (method|client).
+// This is used in body and response_body options in google.api.HttpRule
+type Body struct {
+	// FieldPath is a path to a proto field which the (request|response) body is mapped to.
+	// The (request|response) body is mapped to the (request|response) type itself if FieldPath is empty.
+	FieldPath FieldPath
+}
+
+// AssignableExpr returns an assignable expression in Go to be used to initialize method request object.
+// It starts with "msgExpr", which is the go expression of the method request object.
+func (b Body) AssignableExpr(msgExpr string) string {
+	return b.FieldPath.AssignableExpr(msgExpr)
+}
+
+// FieldPath is a path to a field from a request message.
+type FieldPath []FieldPathComponent
+
+// String returns a string representation of the field path.
+func (p FieldPath) String() string {
+	var components []string
+	for _, c := range p {
+		components = append(components, c.Name)
+	}
+	return strings.Join(components, ".")
+}
+
+// IsNestedProto3 indicates whether the FieldPath is a nested Proto3 path.
+func (p FieldPath) IsNestedProto3() bool {
+	if len(p) > 1 && !p[0].Target.Message.File.proto2() {
+		return true
+	}
+	return false
+}
+
+// AssignableExpr is an assignable expression in Go to be used to assign a value to the target field.
+// It starts with "msgExpr", which is the go expression of the method request object.
+func (p FieldPath) AssignableExpr(msgExpr string) string {
+	l := len(p)
+	if l == 0 {
+		return msgExpr
+	}
+
+	var preparations []string
+	components := msgExpr
+	for i, c := range p {
+		// Check if it is a oneOf field.
+		if c.Target.OneofIndex != nil {
+			index := c.Target.OneofIndex
+			msg := c.Target.Message
+			oneOfName := gogen.CamelCase(msg.GetOneofDecl()[*index].GetName())
+			oneofFieldName := msg.GetName() + "_" + c.AssignableExpr()
+
+			components = components + "." + oneOfName
+			s := `if %s == nil {
+				%s =&%s{}
+			} else if _, ok := %s.(*%s); !ok {
+				return nil, metadata, grpc.Errorf(codes.InvalidArgument, "expect type: *%s, but: %%t\n",%s)
+			}`
+
+			preparations = append(preparations, fmt.Sprintf(s, components, components, oneofFieldName, components, oneofFieldName, oneofFieldName, components))
+			components = components + ".(*" + oneofFieldName + ")"
+		}
+
+		if i == l-1 {
+			components = components + "." + c.AssignableExpr()
+			continue
+		}
+		components = components + "." + c.ValueExpr()
+	}
+
+	preparations = append(preparations, components)
+	return strings.Join(preparations, "\n")
+}
+
+// FieldPathComponent is a path component in FieldPath
+type FieldPathComponent struct {
+	// Name is a name of the proto field which this component corresponds to.
+	// TODO(yugui) is this necessary?
+	Name string
+	// Target is the proto field which this component corresponds to.
+	Target *Field
+}
+
+// AssignableExpr returns an assignable expression in go for this field.
+func (c FieldPathComponent) AssignableExpr() string {
+	return gogen.CamelCase(c.Name)
+}
+
+// ValueExpr returns an expression in go for this field.
+func (c FieldPathComponent) ValueExpr() string {
+	if c.Target.Message.File.proto2() {
+		return fmt.Sprintf("Get%s()", gogen.CamelCase(c.Name))
+	}
+	return gogen.CamelCase(c.Name)
+}
+
+var (
+	proto3ConvertFuncs = map[descriptor.FieldDescriptorProto_Type]string{
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:  "runtime.Float64",
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:   "runtime.Float32",
+		descriptor.FieldDescriptorProto_TYPE_INT64:   "runtime.Int64",
+		descriptor.FieldDescriptorProto_TYPE_UINT64:  "runtime.Uint64",
+		descriptor.FieldDescriptorProto_TYPE_INT32:   "runtime.Int32",
+		descriptor.FieldDescriptorProto_TYPE_FIXED64: "runtime.Uint64",
+		descriptor.FieldDescriptorProto_TYPE_FIXED32: "runtime.Uint32",
+		descriptor.FieldDescriptorProto_TYPE_BOOL:    "runtime.Bool",
+		descriptor.FieldDescriptorProto_TYPE_STRING:  "runtime.String",
+		// FieldDescriptorProto_TYPE_GROUP
+		// FieldDescriptorProto_TYPE_MESSAGE
+		descriptor.FieldDescriptorProto_TYPE_BYTES:    "runtime.Bytes",
+		descriptor.FieldDescriptorProto_TYPE_UINT32:   "runtime.Uint32",
+		descriptor.FieldDescriptorProto_TYPE_ENUM:     "runtime.Enum",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32: "runtime.Int32",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64: "runtime.Int64",
+		descriptor.FieldDescriptorProto_TYPE_SINT32:   "runtime.Int32",
+		descriptor.FieldDescriptorProto_TYPE_SINT64:   "runtime.Int64",
+	}
+
+	proto3RepeatedConvertFuncs = map[descriptor.FieldDescriptorProto_Type]string{
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:  "runtime.Float64Slice",
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:   "runtime.Float32Slice",
+		descriptor.FieldDescriptorProto_TYPE_INT64:   "runtime.Int64Slice",
+		descriptor.FieldDescriptorProto_TYPE_UINT64:  "runtime.Uint64Slice",
+		descriptor.FieldDescriptorProto_TYPE_INT32:   "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_FIXED64: "runtime.Uint64Slice",
+		descriptor.FieldDescriptorProto_TYPE_FIXED32: "runtime.Uint32Slice",
+		descriptor.FieldDescriptorProto_TYPE_BOOL:    "runtime.BoolSlice",
+		descriptor.FieldDescriptorProto_TYPE_STRING:  "runtime.StringSlice",
+		// FieldDescriptorProto_TYPE_GROUP
+		// FieldDescriptorProto_TYPE_MESSAGE
+		descriptor.FieldDescriptorProto_TYPE_BYTES:    "runtime.BytesSlice",
+		descriptor.FieldDescriptorProto_TYPE_UINT32:   "runtime.Uint32Slice",
+		descriptor.FieldDescriptorProto_TYPE_ENUM:     "runtime.EnumSlice",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32: "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64: "runtime.Int64Slice",
+		descriptor.FieldDescriptorProto_TYPE_SINT32:   "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_SINT64:   "runtime.Int64Slice",
+	}
+
+	proto2ConvertFuncs = map[descriptor.FieldDescriptorProto_Type]string{
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:  "runtime.Float64P",
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:   "runtime.Float32P",
+		descriptor.FieldDescriptorProto_TYPE_INT64:   "runtime.Int64P",
+		descriptor.FieldDescriptorProto_TYPE_UINT64:  "runtime.Uint64P",
+		descriptor.FieldDescriptorProto_TYPE_INT32:   "runtime.Int32P",
+		descriptor.FieldDescriptorProto_TYPE_FIXED64: "runtime.Uint64P",
+		descriptor.FieldDescriptorProto_TYPE_FIXED32: "runtime.Uint32P",
+		descriptor.FieldDescriptorProto_TYPE_BOOL:    "runtime.BoolP",
+		descriptor.FieldDescriptorProto_TYPE_STRING:  "runtime.StringP",
+		// FieldDescriptorProto_TYPE_GROUP
+		// FieldDescriptorProto_TYPE_MESSAGE
+		// FieldDescriptorProto_TYPE_BYTES
+		// TODO(yugui) Handle bytes
+		descriptor.FieldDescriptorProto_TYPE_UINT32:   "runtime.Uint32P",
+		descriptor.FieldDescriptorProto_TYPE_ENUM:     "runtime.EnumP",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32: "runtime.Int32P",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64: "runtime.Int64P",
+		descriptor.FieldDescriptorProto_TYPE_SINT32:   "runtime.Int32P",
+		descriptor.FieldDescriptorProto_TYPE_SINT64:   "runtime.Int64P",
+	}
+
+	proto2RepeatedConvertFuncs = map[descriptor.FieldDescriptorProto_Type]string{
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:  "runtime.Float64Slice",
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:   "runtime.Float32Slice",
+		descriptor.FieldDescriptorProto_TYPE_INT64:   "runtime.Int64Slice",
+		descriptor.FieldDescriptorProto_TYPE_UINT64:  "runtime.Uint64Slice",
+		descriptor.FieldDescriptorProto_TYPE_INT32:   "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_FIXED64: "runtime.Uint64Slice",
+		descriptor.FieldDescriptorProto_TYPE_FIXED32: "runtime.Uint32Slice",
+		descriptor.FieldDescriptorProto_TYPE_BOOL:    "runtime.BoolSlice",
+		descriptor.FieldDescriptorProto_TYPE_STRING:  "runtime.StringSlice",
+		// FieldDescriptorProto_TYPE_GROUP
+		// FieldDescriptorProto_TYPE_MESSAGE
+		// FieldDescriptorProto_TYPE_BYTES
+		// TODO(maros7) Handle bytes
+		descriptor.FieldDescriptorProto_TYPE_UINT32:   "runtime.Uint32Slice",
+		descriptor.FieldDescriptorProto_TYPE_ENUM:     "runtime.EnumSlice",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32: "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64: "runtime.Int64Slice",
+		descriptor.FieldDescriptorProto_TYPE_SINT32:   "runtime.Int32Slice",
+		descriptor.FieldDescriptorProto_TYPE_SINT64:   "runtime.Int64Slice",
+	}
+
+	wellKnownTypeConv = map[string]string{
+		".google.protobuf.Timestamp":   "runtime.Timestamp",
+		".google.protobuf.Duration":    "runtime.Duration",
+		".google.protobuf.StringValue": "runtime.StringValue",
+		".google.protobuf.FloatValue":  "runtime.FloatValue",
+		".google.protobuf.DoubleValue": "runtime.DoubleValue",
+		".google.protobuf.BoolValue":   "runtime.BoolValue",
+		".google.protobuf.BytesValue":  "runtime.BytesValue",
+		".google.protobuf.Int32Value":  "runtime.Int32Value",
+		".google.protobuf.UInt32Value": "runtime.UInt32Value",
+		".google.protobuf.Int64Value":  "runtime.Int64Value",
+		".google.protobuf.UInt64Value": "runtime.UInt64Value",
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/BUILD.bazel
new file mode 100644
index 0000000..6cb2162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/BUILD.bazel
@@ -0,0 +1,13 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//:generators"])
+
+go_library(
+    name = "go_default_library",
+    srcs = ["generator.go"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator",
+    deps = [
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/generator.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/generator.go
new file mode 100644
index 0000000..df55da4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator/generator.go
@@ -0,0 +1,13 @@
+// Package generator provides an abstract interface to code generators.
+package generator
+
+import (
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+)
+
+// Generator is an abstraction of code generators.
+type Generator interface {
+	// Generate generates output files from input .proto files.
+	Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/BUILD.bazel
new file mode 100644
index 0000000..316010f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/BUILD.bazel
@@ -0,0 +1,38 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//protoc-gen-grpc-gateway:__subpackages__"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "generator.go",
+        "template.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway",
+    deps = [
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-grpc-gateway/generator:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = [
+        "generator_test.go",
+        "template_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-grpc-gateway/httprule:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/doc.go
new file mode 100644
index 0000000..223d810
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/doc.go
@@ -0,0 +1,2 @@
+// Package gengateway provides a code generator for grpc gateway files.
+package gengateway
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go
new file mode 100644
index 0000000..0b6bfbd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go
@@ -0,0 +1,171 @@
+package gengateway
+
+import (
+	"errors"
+	"fmt"
+	"go/format"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/golang/protobuf/proto"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	gen "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator"
+)
+
+var (
+	errNoTargetService = errors.New("no target service defined in the file")
+)
+
+type pathType int
+
+const (
+	pathTypeImport pathType = iota
+	pathTypeSourceRelative
+)
+
+type generator struct {
+	reg                *descriptor.Registry
+	baseImports        []descriptor.GoPackage
+	useRequestContext  bool
+	registerFuncSuffix string
+	pathType           pathType
+	allowPatchFeature  bool
+}
+
+// New returns a new generator which generates grpc gateway files.
+func New(reg *descriptor.Registry, useRequestContext bool, registerFuncSuffix, pathTypeString string, allowPatchFeature bool) gen.Generator {
+	var imports []descriptor.GoPackage
+	for _, pkgpath := range []string{
+		"context",
+		"io",
+		"net/http",
+		"github.com/grpc-ecosystem/grpc-gateway/runtime",
+		"github.com/grpc-ecosystem/grpc-gateway/utilities",
+		"github.com/golang/protobuf/descriptor",
+		"github.com/golang/protobuf/proto",
+		"google.golang.org/grpc",
+		"google.golang.org/grpc/codes",
+		"google.golang.org/grpc/grpclog",
+		"google.golang.org/grpc/status",
+	} {
+		pkg := descriptor.GoPackage{
+			Path: pkgpath,
+			Name: path.Base(pkgpath),
+		}
+		if err := reg.ReserveGoPackageAlias(pkg.Name, pkg.Path); err != nil {
+			for i := 0; ; i++ {
+				alias := fmt.Sprintf("%s_%d", pkg.Name, i)
+				if err := reg.ReserveGoPackageAlias(alias, pkg.Path); err != nil {
+					continue
+				}
+				pkg.Alias = alias
+				break
+			}
+		}
+		imports = append(imports, pkg)
+	}
+
+	var pathType pathType
+	switch pathTypeString {
+	case "", "import":
+		// paths=import is default
+	case "source_relative":
+		pathType = pathTypeSourceRelative
+	default:
+		glog.Fatalf(`Unknown path type %q: want "import" or "source_relative".`, pathTypeString)
+	}
+
+	return &generator{
+		reg:                reg,
+		baseImports:        imports,
+		useRequestContext:  useRequestContext,
+		registerFuncSuffix: registerFuncSuffix,
+		pathType:           pathType,
+		allowPatchFeature:  allowPatchFeature,
+	}
+}
+
+func (g *generator) Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error) {
+	var files []*plugin.CodeGeneratorResponse_File
+	for _, file := range targets {
+		glog.V(1).Infof("Processing %s", file.GetName())
+		code, err := g.generate(file)
+		if err == errNoTargetService {
+			glog.V(1).Infof("%s: %v", file.GetName(), err)
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		formatted, err := format.Source([]byte(code))
+		if err != nil {
+			glog.Errorf("%v: %s", err, code)
+			return nil, err
+		}
+		name := file.GetName()
+		if g.pathType == pathTypeImport && file.GoPkg.Path != "" {
+			name = fmt.Sprintf("%s/%s", file.GoPkg.Path, filepath.Base(name))
+		}
+		ext := filepath.Ext(name)
+		base := strings.TrimSuffix(name, ext)
+		output := fmt.Sprintf("%s.pb.gw.go", base)
+		files = append(files, &plugin.CodeGeneratorResponse_File{
+			Name:    proto.String(output),
+			Content: proto.String(string(formatted)),
+		})
+		glog.V(1).Infof("Will emit %s", output)
+	}
+	return files, nil
+}
+
+func (g *generator) generate(file *descriptor.File) (string, error) {
+	pkgSeen := make(map[string]bool)
+	var imports []descriptor.GoPackage
+	for _, pkg := range g.baseImports {
+		pkgSeen[pkg.Path] = true
+		imports = append(imports, pkg)
+	}
+	for _, svc := range file.Services {
+		for _, m := range svc.Methods {
+			imports = append(imports, g.addEnumPathParamImports(file, m, pkgSeen)...)
+			pkg := m.RequestType.File.GoPkg
+			if len(m.Bindings) == 0 ||
+				pkg == file.GoPkg || pkgSeen[pkg.Path] {
+				continue
+			}
+			pkgSeen[pkg.Path] = true
+			imports = append(imports, pkg)
+		}
+	}
+	params := param{
+		File:               file,
+		Imports:            imports,
+		UseRequestContext:  g.useRequestContext,
+		RegisterFuncSuffix: g.registerFuncSuffix,
+		AllowPatchFeature:  g.allowPatchFeature,
+	}
+	return applyTemplate(params, g.reg)
+}
+
+// addEnumPathParamImports handles adding import of enum path parameter go packages
+func (g *generator) addEnumPathParamImports(file *descriptor.File, m *descriptor.Method, pkgSeen map[string]bool) []descriptor.GoPackage {
+	var imports []descriptor.GoPackage
+	for _, b := range m.Bindings {
+		for _, p := range b.PathParams {
+			e, err := g.reg.LookupEnum("", p.Target.GetTypeName())
+			if err != nil {
+				continue
+			}
+			pkg := e.File.GoPkg
+			if pkg == file.GoPkg || pkgSeen[pkg.Path] {
+				continue
+			}
+			pkgSeen[pkg.Path] = true
+			imports = append(imports, pkg)
+		}
+	}
+	return imports
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go
new file mode 100644
index 0000000..1d3d3ca
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go
@@ -0,0 +1,715 @@
+package gengateway
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+	"text/template"
+
+	"github.com/golang/glog"
+	generator2 "github.com/golang/protobuf/protoc-gen-go/generator"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+)
+
+type param struct {
+	*descriptor.File
+	Imports            []descriptor.GoPackage
+	UseRequestContext  bool
+	RegisterFuncSuffix string
+	AllowPatchFeature  bool
+}
+
+type binding struct {
+	*descriptor.Binding
+	Registry          *descriptor.Registry
+	AllowPatchFeature bool
+}
+
+// GetBodyFieldPath returns the binding body's fieldpath.
+func (b binding) GetBodyFieldPath() string {
+	if b.Body != nil && len(b.Body.FieldPath) != 0 {
+		return b.Body.FieldPath.String()
+	}
+	return "*"
+}
+
+// GetBodyFieldPath returns the binding body's struct field name.
+func (b binding) GetBodyFieldStructName() (string, error) {
+	if b.Body != nil && len(b.Body.FieldPath) != 0 {
+		return generator2.CamelCase(b.Body.FieldPath.String()), nil
+	}
+	return "", errors.New("No body field found")
+}
+
+// HasQueryParam determines if the binding needs parameters in query string.
+//
+// It sometimes returns true even though actually the binding does not need.
+// But it is not serious because it just results in a small amount of extra codes generated.
+func (b binding) HasQueryParam() bool {
+	if b.Body != nil && len(b.Body.FieldPath) == 0 {
+		return false
+	}
+	fields := make(map[string]bool)
+	for _, f := range b.Method.RequestType.Fields {
+		fields[f.GetName()] = true
+	}
+	if b.Body != nil {
+		delete(fields, b.Body.FieldPath.String())
+	}
+	for _, p := range b.PathParams {
+		delete(fields, p.FieldPath.String())
+	}
+	return len(fields) > 0
+}
+
+func (b binding) QueryParamFilter() queryParamFilter {
+	var seqs [][]string
+	if b.Body != nil {
+		seqs = append(seqs, strings.Split(b.Body.FieldPath.String(), "."))
+	}
+	for _, p := range b.PathParams {
+		seqs = append(seqs, strings.Split(p.FieldPath.String(), "."))
+	}
+	return queryParamFilter{utilities.NewDoubleArray(seqs)}
+}
+
+// HasEnumPathParam returns true if the path parameter slice contains a parameter
+// that maps to an enum proto field that is not repeated, if not false is returned.
+func (b binding) HasEnumPathParam() bool {
+	return b.hasEnumPathParam(false)
+}
+
+// HasRepeatedEnumPathParam returns true if the path parameter slice contains a parameter
+// that maps to a repeated enum proto field, if not false is returned.
+func (b binding) HasRepeatedEnumPathParam() bool {
+	return b.hasEnumPathParam(true)
+}
+
+// hasEnumPathParam returns true if the path parameter slice contains a parameter
+// that maps to a enum proto field and that the enum proto field is or isn't repeated
+// based on the provided 'repeated' parameter.
+func (b binding) hasEnumPathParam(repeated bool) bool {
+	for _, p := range b.PathParams {
+		if p.IsEnum() && p.IsRepeated() == repeated {
+			return true
+		}
+	}
+	return false
+}
+
+// LookupEnum looks up a enum type by path parameter.
+func (b binding) LookupEnum(p descriptor.Parameter) *descriptor.Enum {
+	e, err := b.Registry.LookupEnum("", p.Target.GetTypeName())
+	if err != nil {
+		return nil
+	}
+	return e
+}
+
+// FieldMaskField returns the golang-style name of the variable for a FieldMask, if there is exactly one of that type in
+// the message. Otherwise, it returns an empty string.
+func (b binding) FieldMaskField() string {
+	var fieldMaskField *descriptor.Field
+	for _, f := range b.Method.RequestType.Fields {
+		if f.GetTypeName() == ".google.protobuf.FieldMask" {
+			// if there is more than 1 FieldMask for this request, then return none
+			if fieldMaskField != nil {
+				return ""
+			}
+			fieldMaskField = f
+		}
+	}
+	if fieldMaskField != nil {
+		return generator2.CamelCase(fieldMaskField.GetName())
+	}
+	return ""
+}
+
+// queryParamFilter is a wrapper of utilities.DoubleArray which provides String() to output DoubleArray.Encoding in a stable and predictable format.
+type queryParamFilter struct {
+	*utilities.DoubleArray
+}
+
+func (f queryParamFilter) String() string {
+	encodings := make([]string, len(f.Encoding))
+	for str, enc := range f.Encoding {
+		encodings[enc] = fmt.Sprintf("%q: %d", str, enc)
+	}
+	e := strings.Join(encodings, ", ")
+	return fmt.Sprintf("&utilities.DoubleArray{Encoding: map[string]int{%s}, Base: %#v, Check: %#v}", e, f.Base, f.Check)
+}
+
+type trailerParams struct {
+	Services           []*descriptor.Service
+	UseRequestContext  bool
+	RegisterFuncSuffix string
+	AssumeColonVerb    bool
+}
+
+func applyTemplate(p param, reg *descriptor.Registry) (string, error) {
+	w := bytes.NewBuffer(nil)
+	if err := headerTemplate.Execute(w, p); err != nil {
+		return "", err
+	}
+	var targetServices []*descriptor.Service
+
+	for _, msg := range p.Messages {
+		msgName := generator2.CamelCase(*msg.Name)
+		msg.Name = &msgName
+	}
+	for _, svc := range p.Services {
+		var methodWithBindingsSeen bool
+		svcName := generator2.CamelCase(*svc.Name)
+		svc.Name = &svcName
+		for _, meth := range svc.Methods {
+			glog.V(2).Infof("Processing %s.%s", svc.GetName(), meth.GetName())
+			methName := generator2.CamelCase(*meth.Name)
+			meth.Name = &methName
+			for _, b := range meth.Bindings {
+				methodWithBindingsSeen = true
+				if err := handlerTemplate.Execute(w, binding{
+					Binding:           b,
+					Registry:          reg,
+					AllowPatchFeature: p.AllowPatchFeature,
+				}); err != nil {
+					return "", err
+				}
+
+				// Local
+				if err := localHandlerTemplate.Execute(w, binding{
+					Binding:           b,
+					Registry:          reg,
+					AllowPatchFeature: p.AllowPatchFeature,
+				}); err != nil {
+					return "", err
+				}
+			}
+		}
+		if methodWithBindingsSeen {
+			targetServices = append(targetServices, svc)
+		}
+	}
+	if len(targetServices) == 0 {
+		return "", errNoTargetService
+	}
+
+	assumeColonVerb := true
+	if reg != nil {
+		assumeColonVerb = !reg.GetAllowColonFinalSegments()
+	}
+	tp := trailerParams{
+		Services:           targetServices,
+		UseRequestContext:  p.UseRequestContext,
+		RegisterFuncSuffix: p.RegisterFuncSuffix,
+		AssumeColonVerb:    assumeColonVerb,
+	}
+	// Local
+	if err := localTrailerTemplate.Execute(w, tp); err != nil {
+		return "", err
+	}
+
+	if err := trailerTemplate.Execute(w, tp); err != nil {
+		return "", err
+	}
+	return w.String(), nil
+}
+
+var (
+	headerTemplate = template.Must(template.New("header").Parse(`
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: {{.GetName}}
+
+/*
+Package {{.GoPkg.Name}} is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package {{.GoPkg.Name}}
+import (
+	{{range $i := .Imports}}{{if $i.Standard}}{{$i | printf "%s\n"}}{{end}}{{end}}
+
+	{{range $i := .Imports}}{{if not $i.Standard}}{{$i | printf "%s\n"}}{{end}}{{end}}
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+`))
+
+	handlerTemplate = template.Must(template.New("handler").Parse(`
+{{if and .Method.GetClientStreaming .Method.GetServerStreaming}}
+{{template "bidi-streaming-request-func" .}}
+{{else if .Method.GetClientStreaming}}
+{{template "client-streaming-request-func" .}}
+{{else}}
+{{template "client-rpc-request-func" .}}
+{{end}}
+`))
+
+	_ = template.Must(handlerTemplate.New("request-func-signature").Parse(strings.Replace(`
+{{if .Method.GetServerStreaming}}
+func request_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}(ctx context.Context, marshaler runtime.Marshaler, client {{.Method.Service.GetName}}Client, req *http.Request, pathParams map[string]string) ({{.Method.Service.GetName}}_{{.Method.GetName}}Client, runtime.ServerMetadata, error)
+{{else}}
+func request_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}(ctx context.Context, marshaler runtime.Marshaler, client {{.Method.Service.GetName}}Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error)
+{{end}}`, "\n", "", -1)))
+
+	_ = template.Must(handlerTemplate.New("client-streaming-request-func").Parse(`
+{{template "request-func-signature" .}} {
+	var metadata runtime.ServerMetadata
+	stream, err := client.{{.Method.GetName}}(ctx)
+	if err != nil {
+		grpclog.Infof("Failed to start streaming: %v", err)
+		return nil, metadata, err
+	}
+	dec := marshaler.NewDecoder(req.Body)
+	for {
+		var protoReq {{.Method.RequestType.GoType .Method.Service.File.GoPkg.Path}}
+		err = dec.Decode(&protoReq)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			grpclog.Infof("Failed to decode request: %v", err)
+			return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+		}
+		if err = stream.Send(&protoReq); err != nil {
+			if err == io.EOF {
+				break
+			}
+			grpclog.Infof("Failed to send request: %v", err)
+			return nil, metadata, err
+		}
+	}
+
+	if err := stream.CloseSend(); err != nil {
+		grpclog.Infof("Failed to terminate client stream: %v", err)
+		return nil, metadata, err
+	}
+	header, err := stream.Header()
+	if err != nil {
+		grpclog.Infof("Failed to get header from client: %v", err)
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+{{if .Method.GetServerStreaming}}
+	return stream, metadata, nil
+{{else}}
+	msg, err := stream.CloseAndRecv()
+	metadata.TrailerMD = stream.Trailer()
+	return msg, metadata, err
+{{end}}
+}
+`))
+
+	_ = template.Must(handlerTemplate.New("client-rpc-request-func").Parse(`
+{{$AllowPatchFeature := .AllowPatchFeature}}
+{{if .HasQueryParam}}
+var (
+	filter_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}} = {{.QueryParamFilter}}
+)
+{{end}}
+{{template "request-func-signature" .}} {
+	var protoReq {{.Method.RequestType.GoType .Method.Service.File.GoPkg.Path}}
+	var metadata runtime.ServerMetadata
+{{if .Body}}
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&{{.Body.AssignableExpr "protoReq"}}); err != nil && err != io.EOF  {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+	{{- if and $AllowPatchFeature (eq (.HTTPMethod) "PATCH") (.FieldMaskField) (not (eq "*" .GetBodyFieldPath)) }}
+	if protoReq.{{.FieldMaskField}} == nil || len(protoReq.{{.FieldMaskField}}.GetPaths()) == 0 {
+			_, md := descriptor.ForMessage(protoReq.{{.GetBodyFieldStructName}})
+			if fieldMask, err := runtime.FieldMaskFromRequestBody(newReader(), md); err != nil {
+				return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+			} else {
+				protoReq.{{.FieldMaskField}} = fieldMask
+			}
+	}
+	{{end}}
+{{end}}
+{{if .PathParams}}
+	var (
+		val string
+{{- if .HasEnumPathParam}}
+		e int32
+{{- end}}
+{{- if .HasRepeatedEnumPathParam}}
+		es []int32
+{{- end}}
+		ok bool
+		err error
+		_ = err
+	)
+	{{$binding := .}}
+	{{range $param := .PathParams}}
+	{{$enum := $binding.LookupEnum $param}}
+	val, ok = pathParams[{{$param | printf "%q"}}]
+	if !ok {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", {{$param | printf "%q"}})
+	}
+{{if $param.IsNestedProto3}}
+	err = runtime.PopulateFieldFromPath(&protoReq, {{$param | printf "%q"}}, val)
+	{{if $enum}}
+		e{{if $param.IsRepeated}}s{{end}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}}, {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}_value)
+	{{end}}
+{{else if $enum}}
+	e{{if $param.IsRepeated}}s{{end}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}}, {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}_value)
+{{else}}
+	{{$param.AssignableExpr "protoReq"}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}})
+{{end}}
+	if err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", {{$param | printf "%q"}}, err)
+	}
+{{if and $enum $param.IsRepeated}}
+	s := make([]{{$enum.GoType $param.Target.Message.File.GoPkg.Path}}, len(es))
+	for i, v := range es {
+		s[i] = {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}(v)
+	}
+	{{$param.AssignableExpr "protoReq"}} = s
+{{else if $enum}}
+	{{$param.AssignableExpr "protoReq"}} = {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}(e)
+{{end}}
+	{{end}}
+{{end}}
+{{if .HasQueryParam}}
+	if err := req.ParseForm(); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+	if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+{{end}}
+{{if .Method.GetServerStreaming}}
+	stream, err := client.{{.Method.GetName}}(ctx, &protoReq)
+	if err != nil {
+		return nil, metadata, err
+	}
+	header, err := stream.Header()
+	if err != nil {
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+{{else}}
+	msg, err := client.{{.Method.GetName}}(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+{{end}}
+}`))
+
+	_ = template.Must(handlerTemplate.New("bidi-streaming-request-func").Parse(`
+{{template "request-func-signature" .}} {
+	var metadata runtime.ServerMetadata
+	stream, err := client.{{.Method.GetName}}(ctx)
+	if err != nil {
+		grpclog.Infof("Failed to start streaming: %v", err)
+		return nil, metadata, err
+	}
+	dec := marshaler.NewDecoder(req.Body)
+	handleSend := func() error {
+		var protoReq {{.Method.RequestType.GoType .Method.Service.File.GoPkg.Path}}
+		err := dec.Decode(&protoReq)
+		if err == io.EOF {
+			return err
+		}
+		if err != nil {
+			grpclog.Infof("Failed to decode request: %v", err)
+			return err
+		}
+		if err := stream.Send(&protoReq); err != nil {
+			grpclog.Infof("Failed to send request: %v", err)
+			return err
+		}
+		return nil
+	}
+	if err := handleSend(); err != nil {
+		if cerr := stream.CloseSend(); cerr != nil {
+			grpclog.Infof("Failed to terminate client stream: %v", cerr)
+		}
+		if err == io.EOF {
+			return stream, metadata, nil
+		}
+		return nil, metadata, err
+	}
+	go func() {
+		for {
+			if err := handleSend(); err != nil {
+				break
+			}
+		}
+		if err := stream.CloseSend(); err != nil {
+			grpclog.Infof("Failed to terminate client stream: %v", err)
+		}
+	}()
+	header, err := stream.Header()
+	if err != nil {
+		grpclog.Infof("Failed to get header from client: %v", err)
+		return nil, metadata, err
+	}
+	metadata.HeaderMD = header
+	return stream, metadata, nil
+}
+`))
+
+	localHandlerTemplate = template.Must(template.New("local-handler").Parse(`
+{{if and .Method.GetClientStreaming .Method.GetServerStreaming}}
+{{else if .Method.GetClientStreaming}}
+{{else if .Method.GetServerStreaming}}
+{{else}}
+{{template "local-client-rpc-request-func" .}}
+{{end}}
+`))
+
+	_ = template.Must(localHandlerTemplate.New("local-request-func-signature").Parse(strings.Replace(`
+{{if .Method.GetServerStreaming}}
+{{else}}
+func local_request_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}(ctx context.Context, marshaler runtime.Marshaler, server {{.Method.Service.GetName}}Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error)
+{{end}}`, "\n", "", -1)))
+
+	_ = template.Must(localHandlerTemplate.New("local-client-rpc-request-func").Parse(`
+{{$AllowPatchFeature := .AllowPatchFeature}}
+{{template "local-request-func-signature" .}} {
+	var protoReq {{.Method.RequestType.GoType .Method.Service.File.GoPkg.Path}}
+	var metadata runtime.ServerMetadata
+{{if .Body}}
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&{{.Body.AssignableExpr "protoReq"}}); err != nil && err != io.EOF  {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+	{{- if and $AllowPatchFeature (eq (.HTTPMethod) "PATCH") (.FieldMaskField) (not (eq "*" .GetBodyFieldPath)) }}
+	if protoReq.{{.FieldMaskField}} == nil || len(protoReq.{{.FieldMaskField}}.GetPaths()) == 0 {
+			_, md := descriptor.ForMessage(protoReq.{{.GetBodyFieldStructName}})
+			if fieldMask, err := runtime.FieldMaskFromRequestBody(newReader(), md); err != nil {
+				return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+			} else {
+				protoReq.{{.FieldMaskField}} = fieldMask
+			}
+	}
+	{{end}}
+{{end}}
+{{if .PathParams}}
+	var (
+		val string
+{{- if .HasEnumPathParam}}
+		e int32
+{{- end}}
+{{- if .HasRepeatedEnumPathParam}}
+		es []int32
+{{- end}}
+		ok bool
+		err error
+		_ = err
+	)
+	{{$binding := .}}
+	{{range $param := .PathParams}}
+	{{$enum := $binding.LookupEnum $param}}
+	val, ok = pathParams[{{$param | printf "%q"}}]
+	if !ok {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", {{$param | printf "%q"}})
+	}
+{{if $param.IsNestedProto3}}
+	err = runtime.PopulateFieldFromPath(&protoReq, {{$param | printf "%q"}}, val)
+	{{if $enum}}
+		e{{if $param.IsRepeated}}s{{end}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}}, {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}_value)
+	{{end}}
+{{else if $enum}}
+	e{{if $param.IsRepeated}}s{{end}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}}, {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}_value)
+{{else}}
+	{{$param.AssignableExpr "protoReq"}}, err = {{$param.ConvertFuncExpr}}(val{{if $param.IsRepeated}}, {{$binding.Registry.GetRepeatedPathParamSeparator | printf "%c" | printf "%q"}}{{end}})
+{{end}}
+	if err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", {{$param | printf "%q"}}, err)
+	}
+{{if and $enum $param.IsRepeated}}
+	s := make([]{{$enum.GoType $param.Target.Message.File.GoPkg.Path}}, len(es))
+	for i, v := range es {
+		s[i] = {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}(v)
+	}
+	{{$param.AssignableExpr "protoReq"}} = s
+{{else if $enum}}
+	{{$param.AssignableExpr "protoReq"}} = {{$enum.GoType $param.Target.Message.File.GoPkg.Path}}(e)
+{{end}}
+	{{end}}
+{{end}}
+{{if .HasQueryParam}}
+	if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}); err != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+{{end}}
+{{if .Method.GetServerStreaming}}
+	// TODO
+{{else}}
+	msg, err := server.{{.Method.GetName}}(ctx, &protoReq)
+	return msg, metadata, err
+{{end}}
+}`))
+
+	localTrailerTemplate = template.Must(template.New("local-trailer").Parse(`
+{{$UseRequestContext := .UseRequestContext}}
+{{range $svc := .Services}}
+// Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Server registers the http handlers for service {{$svc.GetName}} to "mux".
+// UnaryRPC     :call {{$svc.GetName}}Server directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Server(ctx context.Context, mux *runtime.ServeMux, server {{$svc.GetName}}Server) error {
+	{{range $m := $svc.Methods}}
+	{{range $b := $m.Bindings}}
+	{{if or $m.GetClientStreaming $m.GetServerStreaming}}
+	mux.Handle({{$b.HTTPMethod | printf "%q"}}, pattern_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+		_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+		return
+	})
+	{{else}}
+	mux.Handle({{$b.HTTPMethod | printf "%q"}}, pattern_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+	{{- if $UseRequestContext }}
+		ctx, cancel := context.WithCancel(req.Context())
+	{{- else -}}
+		ctx, cancel := context.WithCancel(ctx)
+	{{- end }}
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := local_request_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(rctx, inboundMarshaler, server, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		{{ if $b.ResponseBody }}
+		forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, response_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}{resp}, mux.GetForwardResponseOptions()...)
+		{{ else }}
+		forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+		{{end}}
+	})
+	{{end}}
+	{{end}}
+	{{end}}
+	return nil
+}
+{{end}}`))
+
+	trailerTemplate = template.Must(template.New("trailer").Parse(`
+{{$UseRequestContext := .UseRequestContext}}
+{{range $svc := .Services}}
+// Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}FromEndpoint is same as Register{{$svc.GetName}}{{$.RegisterFuncSuffix}} but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}FromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}(ctx, mux, conn)
+}
+
+// Register{{$svc.GetName}}{{$.RegisterFuncSuffix}} registers the http handlers for service {{$svc.GetName}} to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Client(ctx, mux, New{{$svc.GetName}}Client(conn))
+}
+
+// Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Client registers the http handlers for service {{$svc.GetName}}
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "{{$svc.GetName}}Client".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "{{$svc.GetName}}Client"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "{{$svc.GetName}}Client" to call the correct interceptors.
+func Register{{$svc.GetName}}{{$.RegisterFuncSuffix}}Client(ctx context.Context, mux *runtime.ServeMux, client {{$svc.GetName}}Client) error {
+	{{range $m := $svc.Methods}}
+	{{range $b := $m.Bindings}}
+	mux.Handle({{$b.HTTPMethod | printf "%q"}}, pattern_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+	{{- if $UseRequestContext }}
+		ctx, cancel := context.WithCancel(req.Context())
+	{{- else -}}
+		ctx, cancel := context.WithCancel(ctx)
+	{{- end }}
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		{{if $m.GetServerStreaming}}
+		forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+		{{else}}
+		{{ if $b.ResponseBody }}
+		forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, response_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}{resp}, mux.GetForwardResponseOptions()...)
+		{{ else }}
+		forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+		{{end}}
+		{{end}}
+	})
+	{{end}}
+	{{end}}
+	return nil
+}
+
+{{range $m := $svc.Methods}}
+{{range $b := $m.Bindings}}
+{{if $b.ResponseBody}}
+type response_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} struct {
+	proto.Message
+}
+
+func (m response_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}) XXX_ResponseBody() interface{} {
+	response := m.Message.(*{{$m.ResponseType.GoType $m.Service.File.GoPkg.Path}})
+	return {{$b.ResponseBody.AssignableExpr "response"}}
+}
+{{end}}
+{{end}}
+{{end}}
+
+var (
+	{{range $m := $svc.Methods}}
+	{{range $b := $m.Bindings}}
+	pattern_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = runtime.MustPattern(runtime.NewPattern({{$b.PathTmpl.Version}}, {{$b.PathTmpl.OpCodes | printf "%#v"}}, {{$b.PathTmpl.Pool | printf "%#v"}}, {{$b.PathTmpl.Verb | printf "%q"}}, runtime.AssumeColonVerbOpt({{$.AssumeColonVerb}})))
+	{{end}}
+	{{end}}
+)
+
+var (
+	{{range $m := $svc.Methods}}
+	{{range $b := $m.Bindings}}
+	forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}} = {{if $m.GetServerStreaming}}runtime.ForwardResponseStream{{else}}runtime.ForwardResponseMessage{{end}}
+	{{end}}
+	{{end}}
+)
+{{end}}`))
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/BUILD.bazel
new file mode 100644
index 0000000..89f94a1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/BUILD.bazel
@@ -0,0 +1,32 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//:generators"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "compile.go",
+        "parse.go",
+        "types.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule",
+    deps = [
+        "//utilities:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = [
+        "compile_test.go",
+        "parse_test.go",
+        "types_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//utilities:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/compile.go
new file mode 100644
index 0000000..437039a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/compile.go
@@ -0,0 +1,117 @@
+package httprule
+
+import (
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+)
+
+const (
+	opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+	// Version is the version number of the format.
+	Version int
+	// OpCodes is a sequence of operations.
+	OpCodes []int
+	// Pool is a constant pool
+	Pool []string
+	// Verb is a VERB part in the template.
+	Verb string
+	// Fields is a list of field paths bound in this template.
+	Fields []string
+	// Original template (example: /v1/a_bit_of_everything)
+	Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+	Compile() Template
+}
+
+type op struct {
+	// code is the opcode of the operation
+	code utilities.OpCode
+
+	// str is a string operand of the code.
+	// num is ignored if str is not empty.
+	str string
+
+	// num is a numeric operand of the code.
+	num int
+}
+
+func (w wildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPush},
+	}
+}
+
+func (w deepWildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPushM},
+	}
+}
+
+func (l literal) compile() []op {
+	return []op{
+		{
+			code: utilities.OpLitPush,
+			str:  string(l),
+		},
+	}
+}
+
+func (v variable) compile() []op {
+	var ops []op
+	for _, s := range v.segments {
+		ops = append(ops, s.compile()...)
+	}
+	ops = append(ops, op{
+		code: utilities.OpConcatN,
+		num:  len(v.segments),
+	}, op{
+		code: utilities.OpCapture,
+		str:  v.path,
+	})
+
+	return ops
+}
+
+func (t template) Compile() Template {
+	var rawOps []op
+	for _, s := range t.segments {
+		rawOps = append(rawOps, s.compile()...)
+	}
+
+	var (
+		ops    []int
+		pool   []string
+		fields []string
+	)
+	consts := make(map[string]int)
+	for _, op := range rawOps {
+		ops = append(ops, int(op.code))
+		if op.str == "" {
+			ops = append(ops, op.num)
+		} else {
+			if _, ok := consts[op.str]; !ok {
+				consts[op.str] = len(pool)
+				pool = append(pool, op.str)
+			}
+			ops = append(ops, consts[op.str])
+		}
+		if op.code == utilities.OpCapture {
+			fields = append(fields, op.str)
+		}
+	}
+	return Template{
+		Version:  opcodeVersion,
+		OpCodes:  ops,
+		Pool:     pool,
+		Verb:     t.verb,
+		Fields:   fields,
+		Template: t.template,
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/fuzz.go
new file mode 100644
index 0000000..138f7c1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/fuzz.go
@@ -0,0 +1,11 @@
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+	_, err := Parse(string(data))
+	if err != nil {
+		return 0
+	}
+	return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go
new file mode 100644
index 0000000..f933cd8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go
@@ -0,0 +1,351 @@
+package httprule
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/golang/glog"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+	tmpl string
+	msg  string
+}
+
+func (e InvalidTemplateError) Error() string {
+	return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+	if !strings.HasPrefix(tmpl, "/") {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+	}
+	tokens, verb := tokenize(tmpl[1:])
+
+	p := parser{tokens: tokens}
+	segs, err := p.topLevelSegments()
+	if err != nil {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+	}
+
+	return template{
+		segments: segs,
+		verb:     verb,
+		template: tmpl,
+	}, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+	if path == "" {
+		return []string{eof}, ""
+	}
+
+	const (
+		init = iota
+		field
+		nested
+	)
+	var (
+		st = init
+	)
+	for path != "" {
+		var idx int
+		switch st {
+		case init:
+			idx = strings.IndexAny(path, "/{")
+		case field:
+			idx = strings.IndexAny(path, ".=}")
+		case nested:
+			idx = strings.IndexAny(path, "/}")
+		}
+		if idx < 0 {
+			tokens = append(tokens, path)
+			break
+		}
+		switch r := path[idx]; r {
+		case '/', '.':
+		case '{':
+			st = field
+		case '=':
+			st = nested
+		case '}':
+			st = init
+		}
+		if idx == 0 {
+			tokens = append(tokens, path[idx:idx+1])
+		} else {
+			tokens = append(tokens, path[:idx], path[idx:idx+1])
+		}
+		path = path[idx+1:]
+	}
+
+	l := len(tokens)
+	t := tokens[l-1]
+	if idx := strings.LastIndex(t, ":"); idx == 0 {
+		tokens, verb = tokens[:l-1], t[1:]
+	} else if idx > 0 {
+		tokens[l-1], verb = t[:idx], t[idx+1:]
+	}
+	tokens = append(tokens, eof)
+	return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+	tokens   []string
+	accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+	glog.V(1).Infof("Parsing %q", p.tokens)
+	segs, err := p.segments()
+	if err != nil {
+		return nil, err
+	}
+	glog.V(2).Infof("accept segments: %q; %q", p.accepted, p.tokens)
+	if _, err := p.accept(typeEOF); err != nil {
+		return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+	}
+	glog.V(2).Infof("accept eof: %q; %q", p.accepted, p.tokens)
+	return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+	s, err := p.segment()
+	if err != nil {
+		return nil, err
+	}
+	glog.V(2).Infof("accept segment: %q; %q", p.accepted, p.tokens)
+
+	segs := []segment{s}
+	for {
+		if _, err := p.accept("/"); err != nil {
+			return segs, nil
+		}
+		s, err := p.segment()
+		if err != nil {
+			return segs, err
+		}
+		segs = append(segs, s)
+		glog.V(2).Infof("accept segment: %q; %q", p.accepted, p.tokens)
+	}
+}
+
+func (p *parser) segment() (segment, error) {
+	if _, err := p.accept("*"); err == nil {
+		return wildcard{}, nil
+	}
+	if _, err := p.accept("**"); err == nil {
+		return deepWildcard{}, nil
+	}
+	if l, err := p.literal(); err == nil {
+		return l, nil
+	}
+
+	v, err := p.variable()
+	if err != nil {
+		return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
+	}
+	return v, err
+}
+
+func (p *parser) literal() (segment, error) {
+	lit, err := p.accept(typeLiteral)
+	if err != nil {
+		return nil, err
+	}
+	return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+	if _, err := p.accept("{"); err != nil {
+		return nil, err
+	}
+
+	path, err := p.fieldPath()
+	if err != nil {
+		return nil, err
+	}
+
+	var segs []segment
+	if _, err := p.accept("="); err == nil {
+		segs, err = p.segments()
+		if err != nil {
+			return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
+		}
+	} else {
+		segs = []segment{wildcard{}}
+	}
+
+	if _, err := p.accept("}"); err != nil {
+		return nil, fmt.Errorf("unterminated variable segment: %s", path)
+	}
+	return variable{
+		path:     path,
+		segments: segs,
+	}, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+	c, err := p.accept(typeIdent)
+	if err != nil {
+		return "", err
+	}
+	components := []string{c}
+	for {
+		if _, err = p.accept("."); err != nil {
+			return strings.Join(components, "."), nil
+		}
+		c, err := p.accept(typeIdent)
+		if err != nil {
+			return "", fmt.Errorf("invalid field path component: %v", err)
+		}
+		components = append(components, c)
+	}
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+	typeIdent   = termType("ident")
+	typeLiteral = termType("literal")
+	typeEOF     = termType("$")
+)
+
+const (
+	// eof is the terminal symbol which always appears at the end of token sequence.
+	eof = "\u0000"
+)
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+	t := p.tokens[0]
+	switch term {
+	case "/", "*", "**", ".", "=", "{", "}":
+		if t != string(term) && t != "/" {
+			return "", fmt.Errorf("expected %q but got %q", term, t)
+		}
+	case typeEOF:
+		if t != eof {
+			return "", fmt.Errorf("expected EOF but got %q", t)
+		}
+	case typeIdent:
+		if err := expectIdent(t); err != nil {
+			return "", err
+		}
+	case typeLiteral:
+		if err := expectPChars(t); err != nil {
+			return "", err
+		}
+	default:
+		return "", fmt.Errorf("unknown termType %q", term)
+	}
+	p.tokens = p.tokens[1:]
+	p.accepted = append(p.accepted, t)
+	return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//   pchar         = unreserved / pct-encoded / sub-delims / ":" / "@"
+//   unreserved    = ALPHA / DIGIT / "-" / "." / "_" / "~"
+//   sub-delims    = "!" / "$" / "&" / "'" / "(" / ")"
+//                 / "*" / "+" / "," / ";" / "="
+//   pct-encoded   = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+	const (
+		init = iota
+		pct1
+		pct2
+	)
+	st := init
+	for _, r := range t {
+		if st != init {
+			if !isHexDigit(r) {
+				return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+			}
+			switch st {
+			case pct1:
+				st = pct2
+			case pct2:
+				st = init
+			}
+			continue
+		}
+
+		// unreserved
+		switch {
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		}
+		switch r {
+		case '-', '.', '_', '~':
+			// unreserved
+		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+			// sub-delims
+		case ':', '@':
+			// rest of pchar
+		case '%':
+			// pct-encoded
+			st = pct1
+		default:
+			return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+		}
+	}
+	if st != init {
+		return fmt.Errorf("invalid percent-encoding in %q", t)
+	}
+	return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+	if ident == "" {
+		return fmt.Errorf("empty identifier")
+	}
+	for pos, r := range ident {
+		switch {
+		case '0' <= r && r <= '9':
+			if pos == 0 {
+				return fmt.Errorf("identifier starting with digit: %s", ident)
+			}
+			continue
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case r == '_':
+			continue
+		default:
+			return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+		}
+	}
+	return nil
+}
+
+func isHexDigit(r rune) bool {
+	switch {
+	case '0' <= r && r <= '9':
+		return true
+	case 'A' <= r && r <= 'F':
+		return true
+	case 'a' <= r && r <= 'f':
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/types.go
new file mode 100644
index 0000000..5a814a0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+	"fmt"
+	"strings"
+)
+
+type template struct {
+	segments []segment
+	verb     string
+	template string
+}
+
+type segment interface {
+	fmt.Stringer
+	compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+	path     string
+	segments []segment
+}
+
+func (wildcard) String() string {
+	return "*"
+}
+
+func (deepWildcard) String() string {
+	return "**"
+}
+
+func (l literal) String() string {
+	return string(l)
+}
+
+func (v variable) String() string {
+	var segs []string
+	for _, s := range v.segments {
+		segs = append(segs, s.String())
+	}
+	return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+	var segs []string
+	for _, s := range t.segments {
+		segs = append(segs, s.String())
+	}
+	str := strings.Join(segs, "/")
+	if t.verb != "" {
+		str = fmt.Sprintf("%s:%s", str, t.verb)
+	}
+	return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go
new file mode 100644
index 0000000..291ba7d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go
@@ -0,0 +1,141 @@
+// Command protoc-gen-grpc-gateway is a plugin for Google protocol buffer
+// compiler to generate a reverse-proxy, which converts incoming RESTful
+// HTTP/1 requests gRPC invocation.
+// You rarely need to run this program directly. Instead, put this program
+// into your $PATH with a name "protoc-gen-grpc-gateway" and run
+//   protoc --grpc-gateway_out=output_directory path/to/input.proto
+//
+// See README.md for more details.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/golang/protobuf/proto"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"github.com/grpc-ecosystem/grpc-gateway/codegenerator"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway"
+)
+
+var (
+	importPrefix               = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files")
+	importPath                 = flag.String("import_path", "", "used as the package if no input files declare go_package. If it contains slashes, everything up to the rightmost slash is ignored.")
+	registerFuncSuffix         = flag.String("register_func_suffix", "Handler", "used to construct names of generated Register*<Suffix> methods.")
+	useRequestContext          = flag.Bool("request_context", true, "determine whether to use http.Request's context or not")
+	allowDeleteBody            = flag.Bool("allow_delete_body", false, "unless set, HTTP DELETE methods may not have a body")
+	grpcAPIConfiguration       = flag.String("grpc_api_configuration", "", "path to gRPC API Configuration in YAML format")
+	pathType                   = flag.String("paths", "", "specifies how the paths of generated files are structured")
+	allowRepeatedFieldsInBody  = flag.Bool("allow_repeated_fields_in_body", false, "allows to use repeated field in `body` and `response_body` field of `google.api.http` annotation option")
+	repeatedPathParamSeparator = flag.String("repeated_path_param_separator", "csv", "configures how repeated fields should be split. Allowed values are `csv`, `pipes`, `ssv` and `tsv`.")
+	allowPatchFeature          = flag.Bool("allow_patch_feature", true, "determines whether to use PATCH feature involving update masks (using google.protobuf.FieldMask).")
+	allowColonFinalSegments    = flag.Bool("allow_colon_final_segments", false, "determines whether colons are permitted in the final segment of a path")
+	versionFlag                = flag.Bool("version", false, "print the current verison")
+)
+
+// Variables set by goreleaser at build time
+var (
+	version = "dev"
+	commit  = "unknown"
+	date    = "unknown"
+)
+
+func main() {
+	flag.Parse()
+	defer glog.Flush()
+
+	if *versionFlag {
+		fmt.Printf("Version %v, commit %v, built at %v\n", version, commit, date)
+		os.Exit(0)
+	}
+
+	reg := descriptor.NewRegistry()
+
+	glog.V(1).Info("Parsing code generator request")
+	req, err := codegenerator.ParseRequest(os.Stdin)
+	if err != nil {
+		glog.Fatal(err)
+	}
+	glog.V(1).Info("Parsed code generator request")
+	if req.Parameter != nil {
+		for _, p := range strings.Split(req.GetParameter(), ",") {
+			spec := strings.SplitN(p, "=", 2)
+			if len(spec) == 1 {
+				if err := flag.CommandLine.Set(spec[0], ""); err != nil {
+					glog.Fatalf("Cannot set flag %s", p)
+				}
+				continue
+			}
+			name, value := spec[0], spec[1]
+			if strings.HasPrefix(name, "M") {
+				reg.AddPkgMap(name[1:], value)
+				continue
+			}
+			if err := flag.CommandLine.Set(name, value); err != nil {
+				glog.Fatalf("Cannot set flag %s", p)
+			}
+		}
+	}
+
+	g := gengateway.New(reg, *useRequestContext, *registerFuncSuffix, *pathType, *allowPatchFeature)
+
+	if *grpcAPIConfiguration != "" {
+		if err := reg.LoadGrpcAPIServiceFromYAML(*grpcAPIConfiguration); err != nil {
+			emitError(err)
+			return
+		}
+	}
+
+	reg.SetPrefix(*importPrefix)
+	reg.SetImportPath(*importPath)
+	reg.SetAllowDeleteBody(*allowDeleteBody)
+	reg.SetAllowRepeatedFieldsInBody(*allowRepeatedFieldsInBody)
+	reg.SetAllowColonFinalSegments(*allowColonFinalSegments)
+	if err := reg.SetRepeatedPathParamSeparator(*repeatedPathParamSeparator); err != nil {
+		emitError(err)
+		return
+	}
+	if err := reg.Load(req); err != nil {
+		emitError(err)
+		return
+	}
+
+	var targets []*descriptor.File
+	for _, target := range req.FileToGenerate {
+		f, err := reg.LookupFile(target)
+		if err != nil {
+			glog.Fatal(err)
+		}
+		targets = append(targets, f)
+	}
+
+	out, err := g.Generate(targets)
+	glog.V(1).Info("Processed code generator request")
+	if err != nil {
+		emitError(err)
+		return
+	}
+	emitFiles(out)
+}
+
+func emitFiles(out []*plugin.CodeGeneratorResponse_File) {
+	emitResp(&plugin.CodeGeneratorResponse{File: out})
+}
+
+func emitError(err error) {
+	emitResp(&plugin.CodeGeneratorResponse{Error: proto.String(err.Error())})
+}
+
+func emitResp(resp *plugin.CodeGeneratorResponse) {
+	buf, err := proto.Marshal(resp)
+	if err != nil {
+		glog.Fatal(err)
+	}
+	if _, err := os.Stdout.Write(buf); err != nil {
+		glog.Fatal(err)
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/BUILD.bazel
new file mode 100644
index 0000000..d5a1d05
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/BUILD.bazel
@@ -0,0 +1,30 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:private"])
+
+go_library(
+    name = "go_default_library",
+    srcs = ["main.go"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
+    deps = [
+        "//codegenerator:go_default_library",
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-swagger/genswagger:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+    ],
+)
+
+go_binary(
+    name = "protoc-gen-swagger",
+    embed = [":go_default_library"],
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = ["main_test.go"],
+    embed = [":go_default_library"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/defs.bzl b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/defs.bzl
new file mode 100644
index 0000000..4f90807
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/defs.bzl
@@ -0,0 +1,154 @@
+"""Generated an open-api spec for a grpc api spec.
+
+Reads the the api spec in protobuf format and generate an open-api spec.
+Optionally applies settings from the grpc-service configuration.
+"""
+
+def _collect_includes(gen_dir, srcs):
+    """Build an include path mapping.
+
+    It is important to not just collect unique dirnames, to also support
+    proto files of the same name from different packages.
+
+    The implementation below is similar to what bazel does in its
+    ProtoCompileActionBuilder.java
+    """
+    includes = []
+    for src in srcs:
+        ref_path = src.path
+
+        if ref_path.startswith(gen_dir):
+            ref_path = ref_path[len(gen_dir):].lstrip("/")
+
+        if src.owner.workspace_root:
+            workspace_root = src.owner.workspace_root
+            ref_path = ref_path[len(workspace_root):].lstrip("/")
+
+        include = ref_path + "=" + src.path
+        if include not in includes:
+            includes.append(include)
+
+    return includes
+
+def _run_proto_gen_swagger(ctx, direct_proto_srcs, transitive_proto_srcs, actions, protoc, protoc_gen_swagger, grpc_api_configuration, single_output):
+    swagger_files = []
+
+    inputs = direct_proto_srcs + transitive_proto_srcs
+    tools = [protoc_gen_swagger]
+
+    options = ["logtostderr=true", "allow_repeated_fields_in_body=true"]
+    if grpc_api_configuration:
+        options.append("grpc_api_configuration=%s" % grpc_api_configuration.path)
+        inputs.append(grpc_api_configuration)
+
+    includes = _collect_includes(ctx.genfiles_dir.path, direct_proto_srcs + transitive_proto_srcs)
+
+    if single_output:
+        swagger_file = actions.declare_file(
+            "%s.swagger.json" % ctx.attr.name,
+            sibling = direct_proto_srcs[0],
+        )
+        output_dir = ctx.bin_dir.path
+        if direct_proto_srcs[0].owner.workspace_root:
+            output_dir = "/".join([output_dir, direct_proto_srcs[0].owner.workspace_root])
+
+        output_dir = "/".join([output_dir, direct_proto_srcs[0].dirname])
+
+        options.append("allow_merge=true")
+        options.append("merge_file_name=%s" % ctx.attr.name)
+
+        args = actions.args()
+        args.add("--plugin=%s" % protoc_gen_swagger.path)
+        args.add("--swagger_out=%s:%s" % (",".join(options), output_dir))
+        args.add_all(["-I%s" % include for include in includes])
+        args.add_all([src.path for src in direct_proto_srcs])
+
+        actions.run(
+            executable = protoc,
+            inputs = inputs,
+            tools = tools,
+            outputs = [swagger_file],
+            arguments = [args],
+        )
+
+        swagger_files.append(swagger_file)
+    else:
+        for proto in direct_proto_srcs:
+            swagger_file = actions.declare_file(
+                "%s.swagger.json" % proto.basename[:-len(".proto")],
+                sibling = proto,
+            )
+
+            output_dir = ctx.bin_dir.path
+            if proto.owner.workspace_root:
+                output_dir = "/".join([output_dir, proto.owner.workspace_root])
+
+            args = actions.args()
+            args.add("--plugin=%s" % protoc_gen_swagger.path)
+            args.add("--swagger_out=%s:%s" % (",".join(options), output_dir))
+            args.add_all(["-I%s" % include for include in includes])
+            args.add(proto.path)
+
+            actions.run(
+                executable = protoc,
+                inputs = inputs,
+                tools = tools,
+                outputs = [swagger_file],
+                arguments = [args],
+            )
+
+            swagger_files.append(swagger_file)
+
+    return swagger_files
+
+def _proto_gen_swagger_impl(ctx):
+    proto = ctx.attr.proto[ProtoInfo]
+    grpc_api_configuration = ctx.file.grpc_api_configuration
+
+    return [DefaultInfo(
+        files = depset(
+            _run_proto_gen_swagger(
+                ctx,
+                direct_proto_srcs = proto.direct_sources,
+                transitive_proto_srcs = ctx.files._well_known_protos + proto.transitive_sources.to_list(),
+                actions = ctx.actions,
+                protoc = ctx.executable._protoc,
+                protoc_gen_swagger = ctx.executable._protoc_gen_swagger,
+                grpc_api_configuration = grpc_api_configuration,
+                single_output = ctx.attr.single_output,
+            ),
+        ),
+    )]
+
+protoc_gen_swagger = rule(
+    attrs = {
+        "proto": attr.label(
+            allow_rules = ["proto_library"],
+            mandatory = True,
+            providers = ["proto"],
+        ),
+        "grpc_api_configuration": attr.label(
+            allow_single_file = True,
+            mandatory = False,
+        ),
+        "single_output": attr.bool(
+            default = False,
+            mandatory = False,
+        ),
+        "_protoc": attr.label(
+            default = "@com_google_protobuf//:protoc",
+            executable = True,
+            cfg = "host",
+        ),
+        "_well_known_protos": attr.label(
+            default = "@com_google_protobuf//:well_known_protos",
+            allow_files = True,
+        ),
+        "_protoc_gen_swagger": attr.label(
+            default = Label("//protoc-gen-swagger:protoc-gen-swagger"),
+            executable = True,
+            cfg = "host",
+        ),
+    },
+    implementation = _proto_gen_swagger_impl,
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/BUILD.bazel
new file mode 100644
index 0000000..929d0cf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/BUILD.bazel
@@ -0,0 +1,46 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//protoc-gen-swagger:__subpackages__"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "generator.go",
+        "template.go",
+        "types.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger",
+    deps = [
+        "//internal:go_default_library",
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-grpc-gateway/generator:go_default_library",
+        "//protoc-gen-swagger/options:go_default_library",
+        "@com_github_golang_glog//:go_default_library",
+        "@com_github_golang_protobuf//descriptor:go_default_library_gen",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
+        "@io_bazel_rules_go//proto/wkt:any_go_proto",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+        "@io_bazel_rules_go//proto/wkt:struct_go_proto",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = ["template_test.go"],
+    embed = [":go_default_library"],
+    deps = [
+        "//protoc-gen-grpc-gateway/descriptor:go_default_library",
+        "//protoc-gen-grpc-gateway/httprule:go_default_library",
+        "//protoc-gen-swagger/options:go_default_library",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@io_bazel_rules_go//proto/wkt:any_go_proto",
+        "@io_bazel_rules_go//proto/wkt:compiler_plugin_go_proto",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+        "@io_bazel_rules_go//proto/wkt:struct_go_proto",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/doc.go
new file mode 100644
index 0000000..4d28716
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/doc.go
@@ -0,0 +1,2 @@
+// Package genswagger provides a code generator for swagger.
+package genswagger
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/generator.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/generator.go
new file mode 100644
index 0000000..31409ac
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/generator.go
@@ -0,0 +1,247 @@
+package genswagger
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"strings"
+
+	"github.com/golang/glog"
+	pbdescriptor "github.com/golang/protobuf/descriptor"
+	"github.com/golang/protobuf/proto"
+	protocdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/grpc-ecosystem/grpc-gateway/internal"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	gen "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator"
+	swagger_options "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
+)
+
+var (
+	errNoTargetService = errors.New("no target service defined in the file")
+)
+
+type generator struct {
+	reg *descriptor.Registry
+}
+
+type wrapper struct {
+	fileName string
+	swagger  *swaggerObject
+}
+
+// New returns a new generator which generates grpc gateway files.
+func New(reg *descriptor.Registry) gen.Generator {
+	return &generator{reg: reg}
+}
+
+// Merge a lot of swagger file (wrapper) to single one swagger file
+func mergeTargetFile(targets []*wrapper, mergeFileName string) *wrapper {
+	var mergedTarget *wrapper
+	for _, f := range targets {
+		if mergedTarget == nil {
+			mergedTarget = &wrapper{
+				fileName: mergeFileName,
+				swagger:  f.swagger,
+			}
+		} else {
+			for k, v := range f.swagger.Definitions {
+				mergedTarget.swagger.Definitions[k] = v
+			}
+			for k, v := range f.swagger.StreamDefinitions {
+				mergedTarget.swagger.StreamDefinitions[k] = v
+			}
+			for k, v := range f.swagger.Paths {
+				mergedTarget.swagger.Paths[k] = v
+			}
+			for k, v := range f.swagger.SecurityDefinitions {
+				mergedTarget.swagger.SecurityDefinitions[k] = v
+			}
+			mergedTarget.swagger.Security = append(mergedTarget.swagger.Security, f.swagger.Security...)
+		}
+	}
+	return mergedTarget
+}
+
+func fieldName(k string) string {
+	return strings.ReplaceAll(strings.Title(k), "-", "_")
+}
+
+// Q: What's up with the alias types here?
+// A: We don't want to completely override how these structs are marshaled into
+//    JSON, we only want to add fields (see below, extensionMarshalJSON).
+//    An infinite recursion would happen if we'd call json.Marshal on the struct
+//    that has swaggerObject as an embedded field. To avoid that, we'll create
+//    type aliases, and those don't have the custom MarshalJSON methods defined
+//    on them. See http://choly.ca/post/go-json-marshalling/ (or, if it ever
+//    goes away, use
+//    https://web.archive.org/web/20190806073003/http://choly.ca/post/go-json-marshalling/.
+func (so swaggerObject) MarshalJSON() ([]byte, error) {
+	type alias swaggerObject
+	return extensionMarshalJSON(alias(so), so.extensions)
+}
+
+func (so swaggerInfoObject) MarshalJSON() ([]byte, error) {
+	type alias swaggerInfoObject
+	return extensionMarshalJSON(alias(so), so.extensions)
+}
+
+func (so swaggerSecuritySchemeObject) MarshalJSON() ([]byte, error) {
+	type alias swaggerSecuritySchemeObject
+	return extensionMarshalJSON(alias(so), so.extensions)
+}
+
+func (so swaggerOperationObject) MarshalJSON() ([]byte, error) {
+	type alias swaggerOperationObject
+	return extensionMarshalJSON(alias(so), so.extensions)
+}
+
+func (so swaggerResponseObject) MarshalJSON() ([]byte, error) {
+	type alias swaggerResponseObject
+	return extensionMarshalJSON(alias(so), so.extensions)
+}
+
+func extensionMarshalJSON(so interface{}, extensions []extension) ([]byte, error) {
+	// To append arbitrary keys to the struct we'll render into json,
+	// we're creating another struct that embeds the original one, and
+	// its extra fields:
+	//
+	// The struct will look like
+	// struct {
+	//   *swaggerCore
+	//   XGrpcGatewayFoo json.RawMessage `json:"x-grpc-gateway-foo"`
+	//   XGrpcGatewayBar json.RawMessage `json:"x-grpc-gateway-bar"`
+	// }
+	// and thus render into what we want -- the JSON of swaggerCore with the
+	// extensions appended.
+	fields := []reflect.StructField{
+		reflect.StructField{ // embedded
+			Name:      "Embedded",
+			Type:      reflect.TypeOf(so),
+			Anonymous: true,
+		},
+	}
+	for _, ext := range extensions {
+		fields = append(fields, reflect.StructField{
+			Name: fieldName(ext.key),
+			Type: reflect.TypeOf(ext.value),
+			Tag:  reflect.StructTag(fmt.Sprintf("json:\"%s\"", ext.key)),
+		})
+	}
+
+	t := reflect.StructOf(fields)
+	s := reflect.New(t).Elem()
+	s.Field(0).Set(reflect.ValueOf(so))
+	for _, ext := range extensions {
+		s.FieldByName(fieldName(ext.key)).Set(reflect.ValueOf(ext.value))
+	}
+	return json.Marshal(s.Interface())
+}
+
+// encodeSwagger converts swagger file obj to plugin.CodeGeneratorResponse_File
+func encodeSwagger(file *wrapper) (*plugin.CodeGeneratorResponse_File, error) {
+	var formatted bytes.Buffer
+	enc := json.NewEncoder(&formatted)
+	enc.SetIndent("", "  ")
+	if err := enc.Encode(*file.swagger); err != nil {
+		return nil, err
+	}
+	name := file.fileName
+	ext := filepath.Ext(name)
+	base := strings.TrimSuffix(name, ext)
+	output := fmt.Sprintf("%s.swagger.json", base)
+	return &plugin.CodeGeneratorResponse_File{
+		Name:    proto.String(output),
+		Content: proto.String(formatted.String()),
+	}, nil
+}
+
+func (g *generator) Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error) {
+	var files []*plugin.CodeGeneratorResponse_File
+	if g.reg.IsAllowMerge() {
+		var mergedTarget *descriptor.File
+		// try to find proto leader
+		for _, f := range targets {
+			if proto.HasExtension(f.Options, swagger_options.E_Openapiv2Swagger) {
+				mergedTarget = f
+				break
+			}
+		}
+		// merge protos to leader
+		for _, f := range targets {
+			if mergedTarget == nil {
+				mergedTarget = f
+			} else {
+				mergedTarget.Enums = append(mergedTarget.Enums, f.Enums...)
+				mergedTarget.Messages = append(mergedTarget.Messages, f.Messages...)
+				mergedTarget.Services = append(mergedTarget.Services, f.Services...)
+			}
+		}
+
+		targets = nil
+		targets = append(targets, mergedTarget)
+	}
+
+	var swaggers []*wrapper
+	for _, file := range targets {
+		glog.V(1).Infof("Processing %s", file.GetName())
+		swagger, err := applyTemplate(param{File: file, reg: g.reg})
+		if err == errNoTargetService {
+			glog.V(1).Infof("%s: %v", file.GetName(), err)
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		swaggers = append(swaggers, &wrapper{
+			fileName: file.GetName(),
+			swagger:  swagger,
+		})
+	}
+
+	if g.reg.IsAllowMerge() {
+		targetSwagger := mergeTargetFile(swaggers, g.reg.GetMergeFileName())
+		f, err := encodeSwagger(targetSwagger)
+		if err != nil {
+			return nil, fmt.Errorf("failed to encode swagger for %s: %s", g.reg.GetMergeFileName(), err)
+		}
+		files = append(files, f)
+		glog.V(1).Infof("New swagger file will emit")
+	} else {
+		for _, file := range swaggers {
+			f, err := encodeSwagger(file)
+			if err != nil {
+				return nil, fmt.Errorf("failed to encode swagger for %s: %s", file.fileName, err)
+			}
+			files = append(files, f)
+			glog.V(1).Infof("New swagger file will emit")
+		}
+	}
+	return files, nil
+}
+
+//AddStreamError Adds grpc.gateway.runtime.StreamError and google.protobuf.Any to registry for stream responses
+func AddStreamError(reg *descriptor.Registry) error {
+	//load internal protos
+	any := fileDescriptorProtoForMessage(&any.Any{})
+	streamError := fileDescriptorProtoForMessage(&internal.StreamError{})
+	if err := reg.Load(&plugin.CodeGeneratorRequest{
+		ProtoFile: []*protocdescriptor.FileDescriptorProto{
+			any,
+			streamError,
+		},
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func fileDescriptorProtoForMessage(msg pbdescriptor.Message) *protocdescriptor.FileDescriptorProto {
+	fdp, _ := pbdescriptor.ForMessage(msg)
+	fdp.SourceCodeInfo = &protocdescriptor.SourceCodeInfo{}
+	return fdp
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go
new file mode 100644
index 0000000..3d97207
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go
@@ -0,0 +1,1764 @@
+package genswagger
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/golang/glog"
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	structpb "github.com/golang/protobuf/ptypes/struct"
+	pbdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	gogen "github.com/golang/protobuf/protoc-gen-go/generator"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	swagger_options "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
+)
+
+var wktSchemas = map[string]schemaCore{
+	".google.protobuf.Timestamp": schemaCore{
+		Type:   "string",
+		Format: "date-time",
+	},
+	".google.protobuf.Duration": schemaCore{
+		Type: "string",
+	},
+	".google.protobuf.StringValue": schemaCore{
+		Type: "string",
+	},
+	".google.protobuf.BytesValue": schemaCore{
+		Type:   "string",
+		Format: "byte",
+	},
+	".google.protobuf.Int32Value": schemaCore{
+		Type:   "integer",
+		Format: "int32",
+	},
+	".google.protobuf.UInt32Value": schemaCore{
+		Type:   "integer",
+		Format: "int64",
+	},
+	".google.protobuf.Int64Value": schemaCore{
+		Type:   "string",
+		Format: "int64",
+	},
+	".google.protobuf.UInt64Value": schemaCore{
+		Type:   "string",
+		Format: "uint64",
+	},
+	".google.protobuf.FloatValue": schemaCore{
+		Type:   "number",
+		Format: "float",
+	},
+	".google.protobuf.DoubleValue": schemaCore{
+		Type:   "number",
+		Format: "double",
+	},
+	".google.protobuf.BoolValue": schemaCore{
+		Type:   "boolean",
+		Format: "boolean",
+	},
+	".google.protobuf.Empty": schemaCore{},
+	".google.protobuf.Struct": schemaCore{
+		Type: "object",
+	},
+	".google.protobuf.Value": schemaCore{
+		Type: "object",
+	},
+	".google.protobuf.ListValue": schemaCore{
+		Type: "array",
+		Items: (*swaggerItemsObject)(&schemaCore{
+			Type: "object",
+		}),
+	},
+	".google.protobuf.NullValue": schemaCore{
+		Type: "string",
+	},
+}
+
+func listEnumNames(enum *descriptor.Enum) (names []string) {
+	for _, value := range enum.GetValue() {
+		names = append(names, value.GetName())
+	}
+	return names
+}
+
+func getEnumDefault(enum *descriptor.Enum) string {
+	for _, value := range enum.GetValue() {
+		if value.GetNumber() == 0 {
+			return value.GetName()
+		}
+	}
+	return ""
+}
+
+// messageToQueryParameters converts a message to a list of swagger query parameters.
+func messageToQueryParameters(message *descriptor.Message, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) {
+	for _, field := range message.Fields {
+		p, err := queryParams(message, field, "", reg, pathParams)
+		if err != nil {
+			return nil, err
+		}
+		params = append(params, p...)
+	}
+	return params, nil
+}
+
+// queryParams converts a field to a list of swagger query parameters recursively.
+func queryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) {
+	// make sure the parameter is not already listed as a path parameter
+	for _, pathParam := range pathParams {
+		if pathParam.Target == field {
+			return nil, nil
+		}
+	}
+	schema := schemaOfField(field, reg, nil)
+	fieldType := field.GetTypeName()
+	if message.File != nil {
+		comments := fieldProtoComments(reg, message, field)
+		if err := updateSwaggerDataFromComments(&schema, comments, false); err != nil {
+			return nil, err
+		}
+	}
+
+	isEnum := field.GetType() == pbdescriptor.FieldDescriptorProto_TYPE_ENUM
+	items := schema.Items
+	if schema.Type != "" || isEnum {
+		if schema.Type == "object" {
+			return nil, nil // TODO: currently, mapping object in query parameter is not supported
+		}
+		if items != nil && (items.Type == "" || items.Type == "object") && !isEnum {
+			return nil, nil // TODO: currently, mapping object in query parameter is not supported
+		}
+		desc := schema.Description
+		if schema.Title != "" { // merge title because title of parameter object will be ignored
+			desc = strings.TrimSpace(schema.Title + ". " + schema.Description)
+		}
+
+		// verify if the field is required
+		required := false
+		for _, fieldName := range schema.Required {
+			if fieldName == field.GetName() {
+				required = true
+				break
+			}
+		}
+
+		param := swaggerParameterObject{
+			Description: desc,
+			In:          "query",
+			Default:     schema.Default,
+			Type:        schema.Type,
+			Items:       schema.Items,
+			Format:      schema.Format,
+			Required:    required,
+		}
+		if param.Type == "array" {
+			param.CollectionFormat = "multi"
+		}
+
+		if reg.GetUseJSONNamesForFields() {
+			param.Name = prefix + field.GetJsonName()
+		} else {
+			param.Name = prefix + field.GetName()
+		}
+
+		if isEnum {
+			enum, err := reg.LookupEnum("", fieldType)
+			if err != nil {
+				return nil, fmt.Errorf("unknown enum type %s", fieldType)
+			}
+			if items != nil { // array
+				param.Items = &swaggerItemsObject{
+					Type: "string",
+					Enum: listEnumNames(enum),
+				}
+			} else {
+				param.Type = "string"
+				param.Enum = listEnumNames(enum)
+				param.Default = getEnumDefault(enum)
+			}
+			valueComments := enumValueProtoComments(reg, enum)
+			if valueComments != "" {
+				param.Description = strings.TrimLeft(param.Description+"\n\n "+valueComments, "\n")
+			}
+		}
+		return []swaggerParameterObject{param}, nil
+	}
+
+	// nested type, recurse
+	msg, err := reg.LookupMsg("", fieldType)
+	if err != nil {
+		return nil, fmt.Errorf("unknown message type %s", fieldType)
+	}
+	for _, nestedField := range msg.Fields {
+		p, err := queryParams(msg, nestedField, prefix+field.GetName()+".", reg, pathParams)
+		if err != nil {
+			return nil, err
+		}
+		params = append(params, p...)
+	}
+	return params, nil
+}
+
+// findServicesMessagesAndEnumerations discovers all messages and enums defined in the RPC methods of the service.
+func findServicesMessagesAndEnumerations(s []*descriptor.Service, reg *descriptor.Registry, m messageMap, ms messageMap, e enumMap, refs refMap) {
+	for _, svc := range s {
+		for _, meth := range svc.Methods {
+			// Request may be fully included in query
+			if _, ok := refs[fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg))]; ok {
+				if !skipRenderingRef(meth.RequestType.FQMN()) {
+					m[fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg)] = meth.RequestType
+				}
+			}
+			findNestedMessagesAndEnumerations(meth.RequestType, reg, m, e)
+
+			if !skipRenderingRef(meth.ResponseType.FQMN()) {
+				m[fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg)] = meth.ResponseType
+				if meth.GetServerStreaming() {
+					runtimeStreamError := fullyQualifiedNameToSwaggerName(".grpc.gateway.runtime.StreamError", reg)
+					glog.V(1).Infof("StreamError FQMN: %s", runtimeStreamError)
+					streamError, err := reg.LookupMsg(".grpc.gateway.runtime", "StreamError")
+					if err == nil {
+						glog.V(1).Infof("StreamError: %v", streamError)
+						m[runtimeStreamError] = streamError
+						findNestedMessagesAndEnumerations(streamError, reg, m, e)
+					} else {
+						//just in case there is an error looking up StreamError
+						glog.Error(err)
+					}
+					ms[fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg)] = meth.ResponseType
+				}
+			}
+			findNestedMessagesAndEnumerations(meth.ResponseType, reg, m, e)
+		}
+	}
+}
+
+// findNestedMessagesAndEnumerations those can be generated by the services.
+func findNestedMessagesAndEnumerations(message *descriptor.Message, reg *descriptor.Registry, m messageMap, e enumMap) {
+	// Iterate over all the fields that
+	for _, t := range message.Fields {
+		fieldType := t.GetTypeName()
+		// If the type is an empty string then it is a proto primitive
+		if fieldType != "" {
+			if _, ok := m[fieldType]; !ok {
+				msg, err := reg.LookupMsg("", fieldType)
+				if err != nil {
+					enum, err := reg.LookupEnum("", fieldType)
+					if err != nil {
+						panic(err)
+					}
+					e[fieldType] = enum
+					continue
+				}
+				m[fieldType] = msg
+				findNestedMessagesAndEnumerations(msg, reg, m, e)
+			}
+		}
+	}
+}
+
+func skipRenderingRef(refName string) bool {
+	_, ok := wktSchemas[refName]
+	return ok
+}
+
+func renderMessagesAsDefinition(messages messageMap, d swaggerDefinitionsObject, reg *descriptor.Registry, customRefs refMap) {
+	for name, msg := range messages {
+		if skipRenderingRef(name) {
+			continue
+		}
+
+		if opt := msg.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry {
+			continue
+		}
+		schema := swaggerSchemaObject{
+			schemaCore: schemaCore{
+				Type: "object",
+			},
+		}
+		msgComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index))
+		if err := updateSwaggerDataFromComments(&schema, msgComments, false); err != nil {
+			panic(err)
+		}
+		opts, err := extractSchemaOptionFromMessageDescriptor(msg.DescriptorProto)
+		if err != nil {
+			panic(err)
+		}
+		if opts != nil {
+			protoSchema := swaggerSchemaFromProtoSchema(opts, reg, customRefs)
+
+			// Warning: Make sure not to overwrite any fields already set on the schema type.
+			schema.ExternalDocs = protoSchema.ExternalDocs
+			schema.ReadOnly = protoSchema.ReadOnly
+			schema.MultipleOf = protoSchema.MultipleOf
+			schema.Maximum = protoSchema.Maximum
+			schema.ExclusiveMaximum = protoSchema.ExclusiveMaximum
+			schema.Minimum = protoSchema.Minimum
+			schema.ExclusiveMinimum = protoSchema.ExclusiveMinimum
+			schema.MaxLength = protoSchema.MaxLength
+			schema.MinLength = protoSchema.MinLength
+			schema.Pattern = protoSchema.Pattern
+			schema.Default = protoSchema.Default
+			schema.MaxItems = protoSchema.MaxItems
+			schema.MinItems = protoSchema.MinItems
+			schema.UniqueItems = protoSchema.UniqueItems
+			schema.MaxProperties = protoSchema.MaxProperties
+			schema.MinProperties = protoSchema.MinProperties
+			schema.Required = protoSchema.Required
+			if protoSchema.schemaCore.Type != "" || protoSchema.schemaCore.Ref != "" {
+				schema.schemaCore = protoSchema.schemaCore
+			}
+			if protoSchema.Title != "" {
+				schema.Title = protoSchema.Title
+			}
+			if protoSchema.Description != "" {
+				schema.Description = protoSchema.Description
+			}
+			if protoSchema.Example != nil {
+				schema.Example = protoSchema.Example
+			}
+		}
+
+		for _, f := range msg.Fields {
+			fieldValue := schemaOfField(f, reg, customRefs)
+			comments := fieldProtoComments(reg, msg, f)
+			if err := updateSwaggerDataFromComments(&fieldValue, comments, false); err != nil {
+				panic(err)
+			}
+
+			kv := keyVal{Value: fieldValue}
+			if reg.GetUseJSONNamesForFields() {
+				kv.Key = f.GetJsonName()
+			} else {
+				kv.Key = f.GetName()
+			}
+			if schema.Properties == nil {
+				schema.Properties = &swaggerSchemaObjectProperties{}
+			}
+			*schema.Properties = append(*schema.Properties, kv)
+		}
+		d[fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)] = schema
+	}
+}
+
+func renderMessagesAsStreamDefinition(messages messageMap, d swaggerDefinitionsObject, reg *descriptor.Registry) {
+	for name, msg := range messages {
+		if skipRenderingRef(name) {
+			continue
+		}
+
+		if opt := msg.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry {
+			continue
+		}
+		d[fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)] = swaggerSchemaObject{
+			schemaCore: schemaCore{
+				Type: "object",
+			},
+			Title: fmt.Sprintf("Stream result of %s", fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)),
+			Properties: &swaggerSchemaObjectProperties{
+				keyVal{
+					Key: "result",
+					Value: swaggerSchemaObject{
+						schemaCore: schemaCore{
+							Ref: fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)),
+						},
+					},
+				},
+				keyVal{
+					Key: "error",
+					Value: swaggerSchemaObject{
+						schemaCore: schemaCore{
+							Ref: fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(".grpc.gateway.runtime.StreamError", reg)),
+						},
+					},
+				},
+			},
+		}
+	}
+}
+
+// schemaOfField returns a swagger Schema Object for a protobuf field.
+func schemaOfField(f *descriptor.Field, reg *descriptor.Registry, refs refMap) swaggerSchemaObject {
+	const (
+		singular = 0
+		array    = 1
+		object   = 2
+	)
+	var (
+		core      schemaCore
+		aggregate int
+	)
+
+	fd := f.FieldDescriptorProto
+	if m, err := reg.LookupMsg("", f.GetTypeName()); err == nil {
+		if opt := m.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry {
+			fd = m.GetField()[1]
+			aggregate = object
+		}
+	}
+	if fd.GetLabel() == pbdescriptor.FieldDescriptorProto_LABEL_REPEATED {
+		aggregate = array
+	}
+
+	var props *swaggerSchemaObjectProperties
+
+	switch ft := fd.GetType(); ft {
+	case pbdescriptor.FieldDescriptorProto_TYPE_ENUM, pbdescriptor.FieldDescriptorProto_TYPE_MESSAGE, pbdescriptor.FieldDescriptorProto_TYPE_GROUP:
+		if wktSchema, ok := wktSchemas[fd.GetTypeName()]; ok {
+			core = wktSchema
+
+			if fd.GetTypeName() == ".google.protobuf.Empty" {
+				props = &swaggerSchemaObjectProperties{}
+			}
+		} else {
+			core = schemaCore{
+				Ref: "#/definitions/" + fullyQualifiedNameToSwaggerName(fd.GetTypeName(), reg),
+			}
+			if refs != nil {
+				refs[fd.GetTypeName()] = struct{}{}
+
+			}
+		}
+	default:
+		ftype, format, ok := primitiveSchema(ft)
+		if ok {
+			core = schemaCore{Type: ftype, Format: format}
+		} else {
+			core = schemaCore{Type: ft.String(), Format: "UNKNOWN"}
+		}
+	}
+
+	ret := swaggerSchemaObject{}
+
+	switch aggregate {
+	case array:
+		ret = swaggerSchemaObject{
+			schemaCore: schemaCore{
+				Type:  "array",
+				Items: (*swaggerItemsObject)(&core),
+			},
+		}
+	case object:
+		ret = swaggerSchemaObject{
+			schemaCore: schemaCore{
+				Type: "object",
+			},
+			AdditionalProperties: &swaggerSchemaObject{Properties: props, schemaCore: core},
+		}
+	default:
+		ret = swaggerSchemaObject{
+			schemaCore: core,
+			Properties: props,
+		}
+	}
+
+	if j, err := extractJSONSchemaFromFieldDescriptor(fd); err == nil {
+		updateSwaggerObjectFromJSONSchema(&ret, j)
+	}
+
+	return ret
+}
+
+// primitiveSchema returns a pair of "Type" and "Format" in JSON Schema for
+// the given primitive field type.
+// The last return parameter is true iff the field type is actually primitive.
+func primitiveSchema(t pbdescriptor.FieldDescriptorProto_Type) (ftype, format string, ok bool) {
+	switch t {
+	case pbdescriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return "number", "double", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return "number", "float", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_INT64:
+		return "string", "int64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_UINT64:
+		// 64bit integer types are marshaled as string in the default JSONPb marshaler.
+		// TODO(yugui) Add an option to declare 64bit integers as int64.
+		//
+		// NOTE: uint64 is not a predefined format of integer type in Swagger spec.
+		// So we cannot expect that uint64 is commonly supported by swagger processor.
+		return "string", "uint64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_INT32:
+		return "integer", "int32", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_FIXED64:
+		// Ditto.
+		return "string", "uint64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_FIXED32:
+		// Ditto.
+		return "integer", "int64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_BOOL:
+		return "boolean", "boolean", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_STRING:
+		// NOTE: in swagger specifition, format should be empty on string type
+		return "string", "", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "string", "byte", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_UINT32:
+		// Ditto.
+		return "integer", "int64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		return "integer", "int32", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return "string", "int64", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_SINT32:
+		return "integer", "int32", true
+	case pbdescriptor.FieldDescriptorProto_TYPE_SINT64:
+		return "string", "int64", true
+	default:
+		return "", "", false
+	}
+}
+
+// renderEnumerationsAsDefinition inserts enums into the definitions object.
+func renderEnumerationsAsDefinition(enums enumMap, d swaggerDefinitionsObject, reg *descriptor.Registry) {
+	for _, enum := range enums {
+		enumComments := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index))
+
+		// it may be necessary to sort the result of the GetValue function.
+		enumNames := listEnumNames(enum)
+		defaultValue := getEnumDefault(enum)
+		valueComments := enumValueProtoComments(reg, enum)
+		if valueComments != "" {
+			enumComments = strings.TrimLeft(enumComments+"\n\n "+valueComments, "\n")
+		}
+		enumSchemaObject := swaggerSchemaObject{
+			schemaCore: schemaCore{
+				Type:    "string",
+				Enum:    enumNames,
+				Default: defaultValue,
+			},
+		}
+		if err := updateSwaggerDataFromComments(&enumSchemaObject, enumComments, false); err != nil {
+			panic(err)
+		}
+
+		d[fullyQualifiedNameToSwaggerName(enum.FQEN(), reg)] = enumSchemaObject
+	}
+}
+
+// Take in a FQMN or FQEN and return a swagger safe version of the FQMN
+func fullyQualifiedNameToSwaggerName(fqn string, reg *descriptor.Registry) string {
+	registriesSeenMutex.Lock()
+	defer registriesSeenMutex.Unlock()
+	if mapping, present := registriesSeen[reg]; present {
+		return mapping[fqn]
+	}
+	mapping := resolveFullyQualifiedNameToSwaggerNames(append(reg.GetAllFQMNs(), reg.GetAllFQENs()...), reg.GetUseFQNForSwaggerName())
+	registriesSeen[reg] = mapping
+	return mapping[fqn]
+}
+
+// registriesSeen is used to memoise calls to resolveFullyQualifiedNameToSwaggerNames so
+// we don't repeat it unnecessarily, since it can take some time.
+var registriesSeen = map[*descriptor.Registry]map[string]string{}
+var registriesSeenMutex sync.Mutex
+
+// Take the names of every proto and "uniq-ify" them. The idea is to produce a
+// set of names that meet a couple of conditions. They must be stable, they
+// must be unique, and they must be shorter than the FQN.
+//
+// This likely could be made better. This will always generate the same names
+// but may not always produce optimal names. This is a reasonably close
+// approximation of what they should look like in most cases.
+func resolveFullyQualifiedNameToSwaggerNames(messages []string, useFQNForSwaggerName bool) map[string]string {
+	packagesByDepth := make(map[int][][]string)
+	uniqueNames := make(map[string]string)
+
+	hierarchy := func(pkg string) []string {
+		return strings.Split(pkg, ".")
+	}
+
+	for _, p := range messages {
+		h := hierarchy(p)
+		for depth := range h {
+			if _, ok := packagesByDepth[depth]; !ok {
+				packagesByDepth[depth] = make([][]string, 0)
+			}
+			packagesByDepth[depth] = append(packagesByDepth[depth], h[len(h)-depth:])
+		}
+	}
+
+	count := func(list [][]string, item []string) int {
+		i := 0
+		for _, element := range list {
+			if reflect.DeepEqual(element, item) {
+				i++
+			}
+		}
+		return i
+	}
+
+	for _, p := range messages {
+		if useFQNForSwaggerName {
+			// strip leading dot from proto fqn
+			uniqueNames[p] = p[1:]
+		} else {
+			h := hierarchy(p)
+			for depth := 0; depth < len(h); depth++ {
+				if count(packagesByDepth[depth], h[len(h)-depth:]) == 1 {
+					uniqueNames[p] = strings.Join(h[len(h)-depth-1:], "")
+					break
+				}
+				if depth == len(h)-1 {
+					uniqueNames[p] = strings.Join(h, "")
+				}
+			}
+		}
+	}
+	return uniqueNames
+}
+
+var canRegexp = regexp.MustCompile("{([a-zA-Z][a-zA-Z0-9_.]*).*}")
+
+// Swagger expects paths of the form /path/{string_value} but grpc-gateway paths are expected to be of the form /path/{string_value=strprefix/*}. This should reformat it correctly.
+func templateToSwaggerPath(path string, reg *descriptor.Registry) string {
+	// It seems like the right thing to do here is to just use
+	// strings.Split(path, "/") but that breaks badly when you hit a url like
+	// /{my_field=prefix/*}/ and end up with 2 sections representing my_field.
+	// Instead do the right thing and write a small pushdown (counter) automata
+	// for it.
+	var parts []string
+	depth := 0
+	buffer := ""
+	jsonBuffer := ""
+	for _, char := range path {
+		switch char {
+		case '{':
+			// Push on the stack
+			depth++
+			buffer += string(char)
+			jsonBuffer = ""
+			jsonBuffer += string(char)
+			break
+		case '}':
+			if depth == 0 {
+				panic("Encountered } without matching { before it.")
+			}
+			// Pop from the stack
+			depth--
+			buffer += string(char)
+			if reg.GetUseJSONNamesForFields() &&
+				len(jsonBuffer) > 1 {
+				jsonSnakeCaseName := string(jsonBuffer[1:])
+				jsonCamelCaseName := string(lowerCamelCase(jsonSnakeCaseName))
+				prev := string(buffer[:len(buffer)-len(jsonSnakeCaseName)-2])
+				buffer = strings.Join([]string{prev, "{", jsonCamelCaseName, "}"}, "")
+				jsonBuffer = ""
+			}
+		case '/':
+			if depth == 0 {
+				parts = append(parts, buffer)
+				buffer = ""
+				// Since the stack was empty when we hit the '/' we are done with this
+				// section.
+				continue
+			}
+			buffer += string(char)
+			jsonBuffer += string(char)
+		default:
+			buffer += string(char)
+			jsonBuffer += string(char)
+			break
+		}
+	}
+
+	// Now append the last element to parts
+	parts = append(parts, buffer)
+
+	// Parts is now an array of segments of the path. Interestingly, since the
+	// syntax for this subsection CAN be handled by a regexp since it has no
+	// memory.
+	for index, part := range parts {
+		// If part is a resource name such as "parent", "name", "user.name", the format info must be retained.
+		prefix := canRegexp.ReplaceAllString(part, "$1")
+		if isResourceName(prefix) {
+			continue
+		}
+		parts[index] = canRegexp.ReplaceAllString(part, "{$1}")
+	}
+
+	return strings.Join(parts, "/")
+}
+
+func isResourceName(prefix string) bool {
+	words := strings.Split(prefix, ".")
+	l := len(words)
+	field := words[l-1]
+	words = strings.Split(field, ":")
+	field = words[0]
+	return field == "parent" || field == "name"
+}
+
+func renderServices(services []*descriptor.Service, paths swaggerPathsObject, reg *descriptor.Registry, requestResponseRefs, customRefs refMap) error {
+	// Correctness of svcIdx and methIdx depends on 'services' containing the services in the same order as the 'file.Service' array.
+	for svcIdx, svc := range services {
+		for methIdx, meth := range svc.Methods {
+			for bIdx, b := range meth.Bindings {
+				// Iterate over all the swagger parameters
+				parameters := swaggerParametersObject{}
+				for _, parameter := range b.PathParams {
+
+					var paramType, paramFormat, desc, collectionFormat, defaultValue string
+					var enumNames []string
+					var items *swaggerItemsObject
+					var minItems *int
+					switch pt := parameter.Target.GetType(); pt {
+					case pbdescriptor.FieldDescriptorProto_TYPE_GROUP, pbdescriptor.FieldDescriptorProto_TYPE_MESSAGE:
+						if descriptor.IsWellKnownType(parameter.Target.GetTypeName()) {
+							if parameter.IsRepeated() {
+								return fmt.Errorf("only primitive and enum types are allowed in repeated path parameters")
+							}
+							schema := schemaOfField(parameter.Target, reg, customRefs)
+							paramType = schema.Type
+							paramFormat = schema.Format
+							desc = schema.Description
+							defaultValue = schema.Default
+						} else {
+							return fmt.Errorf("only primitive and well-known types are allowed in path parameters")
+						}
+					case pbdescriptor.FieldDescriptorProto_TYPE_ENUM:
+						paramType = "string"
+						paramFormat = ""
+						enum, err := reg.LookupEnum("", parameter.Target.GetTypeName())
+						if err != nil {
+							return err
+						}
+						enumNames = listEnumNames(enum)
+						schema := schemaOfField(parameter.Target, reg, customRefs)
+						desc = schema.Description
+						defaultValue = schema.Default
+					default:
+						var ok bool
+						paramType, paramFormat, ok = primitiveSchema(pt)
+						if !ok {
+							return fmt.Errorf("unknown field type %v", pt)
+						}
+
+						schema := schemaOfField(parameter.Target, reg, customRefs)
+						desc = schema.Description
+						defaultValue = schema.Default
+					}
+
+					if parameter.IsRepeated() {
+						core := schemaCore{Type: paramType, Format: paramFormat}
+						if parameter.IsEnum() {
+							var s []string
+							core.Enum = enumNames
+							enumNames = s
+						}
+						items = (*swaggerItemsObject)(&core)
+						paramType = "array"
+						paramFormat = ""
+						collectionFormat = reg.GetRepeatedPathParamSeparatorName()
+						minItems = new(int)
+						*minItems = 1
+					}
+
+					if desc == "" {
+						desc = fieldProtoComments(reg, parameter.Target.Message, parameter.Target)
+					}
+					parameterString := parameter.String()
+					if reg.GetUseJSONNamesForFields() {
+						parameterString = lowerCamelCase(parameterString)
+					}
+					parameters = append(parameters, swaggerParameterObject{
+						Name:        parameterString,
+						Description: desc,
+						In:          "path",
+						Required:    true,
+						Default:     defaultValue,
+						// Parameters in gRPC-Gateway can only be strings?
+						Type:             paramType,
+						Format:           paramFormat,
+						Enum:             enumNames,
+						Items:            items,
+						CollectionFormat: collectionFormat,
+						MinItems:         minItems,
+					})
+				}
+				// Now check if there is a body parameter
+				if b.Body != nil {
+					var schema swaggerSchemaObject
+					desc := ""
+
+					if len(b.Body.FieldPath) == 0 {
+						schema = swaggerSchemaObject{
+							schemaCore: schemaCore{},
+						}
+
+						wknSchemaCore, isWkn := wktSchemas[meth.RequestType.FQMN()]
+						if !isWkn {
+							schema.Ref = fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg))
+						} else {
+							schema.schemaCore = wknSchemaCore
+
+							// Special workaround for Empty: it's well-known type but wknSchemas only returns schema.schemaCore; but we need to set schema.Properties which is a level higher.
+							if meth.RequestType.FQMN() == ".google.protobuf.Empty" {
+								schema.Properties = &swaggerSchemaObjectProperties{}
+							}
+						}
+					} else {
+						lastField := b.Body.FieldPath[len(b.Body.FieldPath)-1]
+						schema = schemaOfField(lastField.Target, reg, customRefs)
+						if schema.Description != "" {
+							desc = schema.Description
+						} else {
+							desc = fieldProtoComments(reg, lastField.Target.Message, lastField.Target)
+						}
+					}
+
+					if meth.GetClientStreaming() {
+						desc += " (streaming inputs)"
+					}
+					parameters = append(parameters, swaggerParameterObject{
+						Name:        "body",
+						Description: desc,
+						In:          "body",
+						Required:    true,
+						Schema:      &schema,
+					})
+				} else if b.HTTPMethod == "GET" || b.HTTPMethod == "DELETE" {
+					// add the parameters to the query string
+					queryParams, err := messageToQueryParameters(meth.RequestType, reg, b.PathParams)
+					if err != nil {
+						return err
+					}
+					parameters = append(parameters, queryParams...)
+				}
+
+				pathItemObject, ok := paths[templateToSwaggerPath(b.PathTmpl.Template, reg)]
+				if !ok {
+					pathItemObject = swaggerPathItemObject{}
+				}
+
+				methProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.ServiceDescriptorProto)(nil)), "Method")
+				desc := "A successful response."
+				var responseSchema swaggerSchemaObject
+
+				if b.ResponseBody == nil || len(b.ResponseBody.FieldPath) == 0 {
+					responseSchema = swaggerSchemaObject{
+						schemaCore: schemaCore{},
+					}
+
+					// Don't link to a full definition for
+					// empty; it's overly verbose.
+					// schema.Properties{} renders it as
+					// well, without a definition
+					wknSchemaCore, isWkn := wktSchemas[meth.ResponseType.FQMN()]
+					if !isWkn {
+						responseSchema.Ref = fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg))
+					} else {
+						responseSchema.schemaCore = wknSchemaCore
+
+						// Special workaround for Empty: it's well-known type but wknSchemas only returns schema.schemaCore; but we need to set schema.Properties which is a level higher.
+						if meth.ResponseType.FQMN() == ".google.protobuf.Empty" {
+							responseSchema.Properties = &swaggerSchemaObjectProperties{}
+						}
+					}
+				} else {
+					// This is resolving the value of response_body in the google.api.HttpRule
+					lastField := b.ResponseBody.FieldPath[len(b.ResponseBody.FieldPath)-1]
+					responseSchema = schemaOfField(lastField.Target, reg, customRefs)
+					if responseSchema.Description != "" {
+						desc = responseSchema.Description
+					} else {
+						desc = fieldProtoComments(reg, lastField.Target.Message, lastField.Target)
+					}
+				}
+				if meth.GetServerStreaming() {
+					desc += "(streaming responses)"
+					// Use the streamdefinition which wraps the message in a "result"
+					responseSchema.Ref = strings.Replace(responseSchema.Ref, `#/definitions/`, `#/x-stream-definitions/`, 1)
+				}
+
+				tag := svc.GetName()
+				if pkg := svc.File.GetPackage(); pkg != "" && reg.IsIncludePackageInTags() {
+					tag = pkg + "." + tag
+				}
+
+				operationObject := &swaggerOperationObject{
+					Tags:       []string{tag},
+					Parameters: parameters,
+					Responses: swaggerResponsesObject{
+						"200": swaggerResponseObject{
+							Description: desc,
+							Schema:      responseSchema,
+						},
+					},
+				}
+				if bIdx == 0 {
+					operationObject.OperationID = fmt.Sprintf("%s", meth.GetName())
+				} else {
+					// OperationID must be unique in an OpenAPI v2 definition.
+					operationObject.OperationID = fmt.Sprintf("%s%d", meth.GetName(), bIdx+1)
+				}
+
+				// Fill reference map with referenced request messages
+				for _, param := range operationObject.Parameters {
+					if param.Schema != nil && param.Schema.Ref != "" {
+						requestResponseRefs[param.Schema.Ref] = struct{}{}
+					}
+				}
+
+				methComments := protoComments(reg, svc.File, nil, "Method", int32(svcIdx), methProtoPath, int32(methIdx))
+				if err := updateSwaggerDataFromComments(operationObject, methComments, false); err != nil {
+					panic(err)
+				}
+
+				opts, err := extractOperationOptionFromMethodDescriptor(meth.MethodDescriptorProto)
+				if opts != nil {
+					if err != nil {
+						panic(err)
+					}
+					operationObject.ExternalDocs = protoExternalDocumentationToSwaggerExternalDocumentation(opts.ExternalDocs)
+					// TODO(ivucica): this would be better supported by looking whether the method is deprecated in the proto file
+					operationObject.Deprecated = opts.Deprecated
+
+					if opts.Summary != "" {
+						operationObject.Summary = opts.Summary
+					}
+					if opts.Description != "" {
+						operationObject.Description = opts.Description
+					}
+					if len(opts.Tags) > 0 {
+						operationObject.Tags = make([]string, len(opts.Tags))
+						copy(operationObject.Tags, opts.Tags)
+					}
+					if opts.Security != nil {
+						newSecurity := []swaggerSecurityRequirementObject{}
+						if operationObject.Security != nil {
+							newSecurity = *operationObject.Security
+						}
+						for _, secReq := range opts.Security {
+							newSecReq := swaggerSecurityRequirementObject{}
+							for secReqKey, secReqValue := range secReq.SecurityRequirement {
+								if secReqValue == nil {
+									continue
+								}
+
+								newSecReqValue := make([]string, len(secReqValue.Scope))
+								copy(newSecReqValue, secReqValue.Scope)
+								newSecReq[secReqKey] = newSecReqValue
+							}
+
+							if len(newSecReq) > 0 {
+								newSecurity = append(newSecurity, newSecReq)
+							}
+						}
+						operationObject.Security = &newSecurity
+					}
+					if opts.Responses != nil {
+						for name, resp := range opts.Responses {
+							respObj := swaggerResponseObject{
+								Description: resp.Description,
+								Schema:      swaggerSchemaFromProtoSchema(resp.Schema, reg, customRefs),
+							}
+							if resp.Extensions != nil {
+								exts, err := processExtensions(resp.Extensions)
+								if err != nil {
+									return err
+								}
+								respObj.extensions = exts
+							}
+							operationObject.Responses[name] = respObj
+						}
+					}
+
+					if opts.Extensions != nil {
+						exts, err := processExtensions(opts.Extensions)
+						if err != nil {
+							return err
+						}
+						operationObject.extensions = exts
+					}
+
+					// TODO(ivucica): add remaining fields of operation object
+				}
+
+				switch b.HTTPMethod {
+				case "DELETE":
+					pathItemObject.Delete = operationObject
+					break
+				case "GET":
+					pathItemObject.Get = operationObject
+					break
+				case "POST":
+					pathItemObject.Post = operationObject
+					break
+				case "PUT":
+					pathItemObject.Put = operationObject
+					break
+				case "PATCH":
+					pathItemObject.Patch = operationObject
+					break
+				}
+				paths[templateToSwaggerPath(b.PathTmpl.Template, reg)] = pathItemObject
+			}
+		}
+	}
+
+	// Success! return nil on the error object
+	return nil
+}
+
+// This function is called with a param which contains the entire definition of a method.
+func applyTemplate(p param) (*swaggerObject, error) {
+	// Create the basic template object. This is the object that everything is
+	// defined off of.
+	s := swaggerObject{
+		// Swagger 2.0 is the version of this document
+		Swagger:           "2.0",
+		Schemes:           []string{"http", "https"},
+		Consumes:          []string{"application/json"},
+		Produces:          []string{"application/json"},
+		Paths:             make(swaggerPathsObject),
+		Definitions:       make(swaggerDefinitionsObject),
+		StreamDefinitions: make(swaggerDefinitionsObject),
+		Info: swaggerInfoObject{
+			Title:   *p.File.Name,
+			Version: "version not set",
+		},
+	}
+
+	// Loops through all the services and their exposed GET/POST/PUT/DELETE definitions
+	// and create entries for all of them.
+	// Also adds custom user specified references to second map.
+	requestResponseRefs, customRefs := refMap{}, refMap{}
+	if err := renderServices(p.Services, s.Paths, p.reg, requestResponseRefs, customRefs); err != nil {
+		panic(err)
+	}
+
+	// Find all the service's messages and enumerations that are defined (recursively)
+	// and write request, response and other custom (but referenced) types out as definition objects.
+	m := messageMap{}
+	ms := messageMap{}
+	e := enumMap{}
+	findServicesMessagesAndEnumerations(p.Services, p.reg, m, ms, e, requestResponseRefs)
+	renderMessagesAsDefinition(m, s.Definitions, p.reg, customRefs)
+	renderMessagesAsStreamDefinition(ms, s.StreamDefinitions, p.reg)
+	renderEnumerationsAsDefinition(e, s.Definitions, p.reg)
+
+	// File itself might have some comments and metadata.
+	packageProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Package")
+	packageComments := protoComments(p.reg, p.File, nil, "Package", packageProtoPath)
+	if err := updateSwaggerDataFromComments(&s, packageComments, true); err != nil {
+		panic(err)
+	}
+
+	// There may be additional options in the swagger option in the proto.
+	spb, err := extractSwaggerOptionFromFileDescriptor(p.FileDescriptorProto)
+	if err != nil {
+		panic(err)
+	}
+	if spb != nil {
+		if spb.Swagger != "" {
+			s.Swagger = spb.Swagger
+		}
+		if spb.Info != nil {
+			if spb.Info.Title != "" {
+				s.Info.Title = spb.Info.Title
+			}
+			if spb.Info.Description != "" {
+				s.Info.Description = spb.Info.Description
+			}
+			if spb.Info.TermsOfService != "" {
+				s.Info.TermsOfService = spb.Info.TermsOfService
+			}
+			if spb.Info.Version != "" {
+				s.Info.Version = spb.Info.Version
+			}
+			if spb.Info.Contact != nil {
+				if s.Info.Contact == nil {
+					s.Info.Contact = &swaggerContactObject{}
+				}
+				if spb.Info.Contact.Name != "" {
+					s.Info.Contact.Name = spb.Info.Contact.Name
+				}
+				if spb.Info.Contact.Url != "" {
+					s.Info.Contact.URL = spb.Info.Contact.Url
+				}
+				if spb.Info.Contact.Email != "" {
+					s.Info.Contact.Email = spb.Info.Contact.Email
+				}
+			}
+			if spb.Info.License != nil {
+				if s.Info.License == nil {
+					s.Info.License = &swaggerLicenseObject{}
+				}
+				if spb.Info.License.Name != "" {
+					s.Info.License.Name = spb.Info.License.Name
+				}
+				if spb.Info.License.Url != "" {
+					s.Info.License.URL = spb.Info.License.Url
+				}
+			}
+			if spb.Info.Extensions != nil {
+				exts, err := processExtensions(spb.Info.Extensions)
+				if err != nil {
+					return nil, err
+				}
+				s.Info.extensions = exts
+			}
+		}
+		if spb.Host != "" {
+			s.Host = spb.Host
+		}
+		if spb.BasePath != "" {
+			s.BasePath = spb.BasePath
+		}
+		if len(spb.Schemes) > 0 {
+			s.Schemes = make([]string, len(spb.Schemes))
+			for i, scheme := range spb.Schemes {
+				s.Schemes[i] = strings.ToLower(scheme.String())
+			}
+		}
+		if len(spb.Consumes) > 0 {
+			s.Consumes = make([]string, len(spb.Consumes))
+			copy(s.Consumes, spb.Consumes)
+		}
+		if len(spb.Produces) > 0 {
+			s.Produces = make([]string, len(spb.Produces))
+			copy(s.Produces, spb.Produces)
+		}
+		if spb.SecurityDefinitions != nil && spb.SecurityDefinitions.Security != nil {
+			if s.SecurityDefinitions == nil {
+				s.SecurityDefinitions = swaggerSecurityDefinitionsObject{}
+			}
+			for secDefKey, secDefValue := range spb.SecurityDefinitions.Security {
+				var newSecDefValue swaggerSecuritySchemeObject
+				if oldSecDefValue, ok := s.SecurityDefinitions[secDefKey]; !ok {
+					newSecDefValue = swaggerSecuritySchemeObject{}
+				} else {
+					newSecDefValue = oldSecDefValue
+				}
+				if secDefValue.Type != swagger_options.SecurityScheme_TYPE_INVALID {
+					switch secDefValue.Type {
+					case swagger_options.SecurityScheme_TYPE_BASIC:
+						newSecDefValue.Type = "basic"
+					case swagger_options.SecurityScheme_TYPE_API_KEY:
+						newSecDefValue.Type = "apiKey"
+					case swagger_options.SecurityScheme_TYPE_OAUTH2:
+						newSecDefValue.Type = "oauth2"
+					}
+				}
+				if secDefValue.Description != "" {
+					newSecDefValue.Description = secDefValue.Description
+				}
+				if secDefValue.Name != "" {
+					newSecDefValue.Name = secDefValue.Name
+				}
+				if secDefValue.In != swagger_options.SecurityScheme_IN_INVALID {
+					switch secDefValue.In {
+					case swagger_options.SecurityScheme_IN_QUERY:
+						newSecDefValue.In = "query"
+					case swagger_options.SecurityScheme_IN_HEADER:
+						newSecDefValue.In = "header"
+					}
+				}
+				if secDefValue.Flow != swagger_options.SecurityScheme_FLOW_INVALID {
+					switch secDefValue.Flow {
+					case swagger_options.SecurityScheme_FLOW_IMPLICIT:
+						newSecDefValue.Flow = "implicit"
+					case swagger_options.SecurityScheme_FLOW_PASSWORD:
+						newSecDefValue.Flow = "password"
+					case swagger_options.SecurityScheme_FLOW_APPLICATION:
+						newSecDefValue.Flow = "application"
+					case swagger_options.SecurityScheme_FLOW_ACCESS_CODE:
+						newSecDefValue.Flow = "accessCode"
+					}
+				}
+				if secDefValue.AuthorizationUrl != "" {
+					newSecDefValue.AuthorizationURL = secDefValue.AuthorizationUrl
+				}
+				if secDefValue.TokenUrl != "" {
+					newSecDefValue.TokenURL = secDefValue.TokenUrl
+				}
+				if secDefValue.Scopes != nil {
+					if newSecDefValue.Scopes == nil {
+						newSecDefValue.Scopes = swaggerScopesObject{}
+					}
+					for scopeKey, scopeDesc := range secDefValue.Scopes.Scope {
+						newSecDefValue.Scopes[scopeKey] = scopeDesc
+					}
+				}
+				if secDefValue.Extensions != nil {
+					exts, err := processExtensions(secDefValue.Extensions)
+					if err != nil {
+						return nil, err
+					}
+					newSecDefValue.extensions = exts
+				}
+				s.SecurityDefinitions[secDefKey] = newSecDefValue
+			}
+		}
+		if spb.Security != nil {
+			newSecurity := []swaggerSecurityRequirementObject{}
+			if s.Security == nil {
+				newSecurity = []swaggerSecurityRequirementObject{}
+			} else {
+				newSecurity = s.Security
+			}
+			for _, secReq := range spb.Security {
+				newSecReq := swaggerSecurityRequirementObject{}
+				for secReqKey, secReqValue := range secReq.SecurityRequirement {
+					newSecReqValue := make([]string, len(secReqValue.Scope))
+					copy(newSecReqValue, secReqValue.Scope)
+					newSecReq[secReqKey] = newSecReqValue
+				}
+				newSecurity = append(newSecurity, newSecReq)
+			}
+			s.Security = newSecurity
+		}
+		s.ExternalDocs = protoExternalDocumentationToSwaggerExternalDocumentation(spb.ExternalDocs)
+		// Populate all Paths with Responses set at top level,
+		// preferring Responses already set over those at the top level.
+		if spb.Responses != nil {
+			for _, verbs := range s.Paths {
+				var maps []swaggerResponsesObject
+				if verbs.Delete != nil {
+					maps = append(maps, verbs.Delete.Responses)
+				}
+				if verbs.Get != nil {
+					maps = append(maps, verbs.Get.Responses)
+				}
+				if verbs.Post != nil {
+					maps = append(maps, verbs.Post.Responses)
+				}
+				if verbs.Put != nil {
+					maps = append(maps, verbs.Put.Responses)
+				}
+				if verbs.Patch != nil {
+					maps = append(maps, verbs.Patch.Responses)
+				}
+
+				for k, v := range spb.Responses {
+					for _, respMap := range maps {
+						if _, ok := respMap[k]; ok {
+							// Don't overwrite already existing Responses
+							continue
+						}
+						respMap[k] = swaggerResponseObject{
+							Description: v.Description,
+							Schema:      swaggerSchemaFromProtoSchema(v.Schema, p.reg, customRefs),
+						}
+					}
+				}
+			}
+		}
+
+		if spb.Extensions != nil {
+			exts, err := processExtensions(spb.Extensions)
+			if err != nil {
+				return nil, err
+			}
+			s.extensions = exts
+		}
+
+		// Additional fields on the OpenAPI v2 spec's "Swagger" object
+		// should be added here, once supported in the proto.
+	}
+
+	// Finally add any references added by users that aren't
+	// otherwise rendered.
+	addCustomRefs(s.Definitions, p.reg, customRefs)
+
+	return &s, nil
+}
+
+func processExtensions(inputExts map[string]*structpb.Value) ([]extension, error) {
+	exts := []extension{}
+	for k, v := range inputExts {
+		if !strings.HasPrefix(k, "x-") {
+			return nil, fmt.Errorf("Extension keys need to start with \"x-\": %q", k)
+		}
+		ext, err := (&jsonpb.Marshaler{Indent: "  "}).MarshalToString(v)
+		if err != nil {
+			return nil, err
+		}
+		exts = append(exts, extension{key: k, value: json.RawMessage(ext)})
+	}
+	sort.Slice(exts, func(i, j int) bool { return exts[i].key < exts[j].key })
+	return exts, nil
+}
+
+// updateSwaggerDataFromComments updates a Swagger object based on a comment
+// from the proto file.
+//
+// First paragraph of a comment is used for summary. Remaining paragraphs of
+// a comment are used for description. If 'Summary' field is not present on
+// the passed swaggerObject, the summary and description are joined by \n\n.
+//
+// If there is a field named 'Info', its 'Summary' and 'Description' fields
+// will be updated instead.
+//
+// If there is no 'Summary', the same behavior will be attempted on 'Title',
+// but only if the last character is not a period.
+func updateSwaggerDataFromComments(swaggerObject interface{}, comment string, isPackageObject bool) error {
+	if len(comment) == 0 {
+		return nil
+	}
+
+	// Figure out what to apply changes to.
+	swaggerObjectValue := reflect.ValueOf(swaggerObject)
+	infoObjectValue := swaggerObjectValue.Elem().FieldByName("Info")
+	if !infoObjectValue.CanSet() {
+		// No such field? Apply summary and description directly to
+		// passed object.
+		infoObjectValue = swaggerObjectValue.Elem()
+	}
+
+	// Figure out which properties to update.
+	summaryValue := infoObjectValue.FieldByName("Summary")
+	descriptionValue := infoObjectValue.FieldByName("Description")
+	readOnlyValue := infoObjectValue.FieldByName("ReadOnly")
+
+	if readOnlyValue.Kind() == reflect.Bool && readOnlyValue.CanSet() && strings.Contains(comment, "Output only.") {
+		readOnlyValue.Set(reflect.ValueOf(true))
+	}
+
+	usingTitle := false
+	if !summaryValue.CanSet() {
+		summaryValue = infoObjectValue.FieldByName("Title")
+		usingTitle = true
+	}
+
+	paragraphs := strings.Split(comment, "\n\n")
+
+	// If there is a summary (or summary-equivalent) and it's empty, use the first
+	// paragraph as summary, and the rest as description.
+	if summaryValue.CanSet() {
+		summary := strings.TrimSpace(paragraphs[0])
+		description := strings.TrimSpace(strings.Join(paragraphs[1:], "\n\n"))
+		if !usingTitle || (len(summary) > 0 && summary[len(summary)-1] != '.') {
+			// overrides the schema value only if it's empty
+			// keep the comment precedence when updating the package definition
+			if summaryValue.Len() == 0 || isPackageObject {
+				summaryValue.Set(reflect.ValueOf(summary))
+			}
+			if len(description) > 0 {
+				if !descriptionValue.CanSet() {
+					return fmt.Errorf("Encountered object type with a summary, but no description")
+				}
+				// overrides the schema value only if it's empty
+				// keep the comment precedence when updating the package definition
+				if descriptionValue.Len() == 0 || isPackageObject {
+					descriptionValue.Set(reflect.ValueOf(description))
+				}
+			}
+			return nil
+		}
+	}
+
+	// There was no summary field on the swaggerObject. Try to apply the
+	// whole comment into description if the swagger object description is empty.
+	if descriptionValue.CanSet() {
+		if descriptionValue.Len() == 0 || isPackageObject {
+			descriptionValue.Set(reflect.ValueOf(strings.Join(paragraphs, "\n\n")))
+		}
+		return nil
+	}
+
+	return fmt.Errorf("no description nor summary property")
+}
+
+func fieldProtoComments(reg *descriptor.Registry, msg *descriptor.Message, field *descriptor.Field) string {
+	protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "Field")
+	for i, f := range msg.Fields {
+		if f == field {
+			return protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index), protoPath, int32(i))
+		}
+	}
+	return ""
+}
+
+func enumValueProtoComments(reg *descriptor.Registry, enum *descriptor.Enum) string {
+	protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.EnumDescriptorProto)(nil)), "Value")
+	var comments []string
+	for idx, value := range enum.GetValue() {
+		name := value.GetName()
+		str := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index), protoPath, int32(idx))
+		if str != "" {
+			comments = append(comments, name+": "+str)
+		}
+	}
+	if len(comments) > 0 {
+		return "- " + strings.Join(comments, "\n - ")
+	}
+	return ""
+}
+
+func protoComments(reg *descriptor.Registry, file *descriptor.File, outers []string, typeName string, typeIndex int32, fieldPaths ...int32) string {
+	if file.SourceCodeInfo == nil {
+		fmt.Fprintln(os.Stderr, "descriptor.File should not contain nil SourceCodeInfo")
+		return ""
+	}
+
+	outerPaths := make([]int32, len(outers))
+	for i := range outers {
+		location := ""
+		if file.Package != nil {
+			location = file.GetPackage()
+		}
+
+		msg, err := reg.LookupMsg(location, strings.Join(outers[:i+1], "."))
+		if err != nil {
+			panic(err)
+		}
+		outerPaths[i] = int32(msg.Index)
+	}
+
+	for _, loc := range file.SourceCodeInfo.Location {
+		if !isProtoPathMatches(loc.Path, outerPaths, typeName, typeIndex, fieldPaths) {
+			continue
+		}
+		comments := ""
+		if loc.LeadingComments != nil {
+			comments = strings.TrimRight(*loc.LeadingComments, "\n")
+			comments = strings.TrimSpace(comments)
+			// TODO(ivucica): this is a hack to fix "// " being interpreted as "//".
+			// perhaps we should:
+			// - split by \n
+			// - determine if every (but first and last) line begins with " "
+			// - trim every line only if that is the case
+			// - join by \n
+			comments = strings.Replace(comments, "\n ", "\n", -1)
+		}
+		return comments
+	}
+	return ""
+}
+
+var messageProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "MessageType")
+var nestedProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "NestedType")
+var packageProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Package")
+var serviceProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Service")
+var methodProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.ServiceDescriptorProto)(nil)), "Method")
+
+func isProtoPathMatches(paths []int32, outerPaths []int32, typeName string, typeIndex int32, fieldPaths []int32) bool {
+	if typeName == "Package" && typeIndex == packageProtoPath {
+		// path for package comments is just [2], and all the other processing
+		// is too complex for it.
+		if len(paths) == 0 || typeIndex != paths[0] {
+			return false
+		}
+		return true
+	}
+
+	if len(paths) != len(outerPaths)*2+2+len(fieldPaths) {
+		return false
+	}
+
+	if typeName == "Method" {
+		if paths[0] != serviceProtoPath || paths[2] != methodProtoPath {
+			return false
+		}
+		paths = paths[2:]
+	} else {
+		typeNameDescriptor := reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil))
+
+		if len(outerPaths) > 0 {
+			if paths[0] != messageProtoPath || paths[1] != outerPaths[0] {
+				return false
+			}
+			paths = paths[2:]
+			outerPaths = outerPaths[1:]
+
+			for i, v := range outerPaths {
+				if paths[i*2] != nestedProtoPath || paths[i*2+1] != v {
+					return false
+				}
+			}
+			paths = paths[len(outerPaths)*2:]
+
+			if typeName == "MessageType" {
+				typeName = "NestedType"
+			}
+			typeNameDescriptor = reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil))
+		}
+
+		if paths[0] != protoPathIndex(typeNameDescriptor, typeName) || paths[1] != typeIndex {
+			return false
+		}
+		paths = paths[2:]
+	}
+
+	for i, v := range fieldPaths {
+		if paths[i] != v {
+			return false
+		}
+	}
+	return true
+}
+
+// protoPathIndex returns a path component for google.protobuf.descriptor.SourceCode_Location.
+//
+// Specifically, it returns an id as generated from descriptor proto which
+// can be used to determine what type the id following it in the path is.
+// For example, if we are trying to locate comments related to a field named
+// `Address` in a message named `Person`, the path will be:
+//
+//     [4, a, 2, b]
+//
+// While `a` gets determined by the order in which the messages appear in
+// the proto file, and `b` is the field index specified in the proto
+// file itself, the path actually needs to specify that `a` refers to a
+// message and not, say, a service; and  that `b` refers to a field and not
+// an option.
+//
+// protoPathIndex figures out the values 4 and 2 in the above example. Because
+// messages are top level objects, the value of 4 comes from field id for
+// `MessageType` inside `google.protobuf.descriptor.FileDescriptor` message.
+// This field has a message type `google.protobuf.descriptor.DescriptorProto`.
+// And inside message `DescriptorProto`, there is a field named `Field` with id
+// 2.
+//
+// Some code generators seem to be hardcoding these values; this method instead
+// interprets them from `descriptor.proto`-derived Go source as necessary.
+func protoPathIndex(descriptorType reflect.Type, what string) int32 {
+	field, ok := descriptorType.Elem().FieldByName(what)
+	if !ok {
+		panic(fmt.Errorf("could not find protobuf descriptor type id for %s", what))
+	}
+	pbtag := field.Tag.Get("protobuf")
+	if pbtag == "" {
+		panic(fmt.Errorf("no Go tag 'protobuf' on protobuf descriptor for %s", what))
+	}
+	path, err := strconv.Atoi(strings.Split(pbtag, ",")[1])
+	if err != nil {
+		panic(fmt.Errorf("protobuf descriptor id for %s cannot be converted to a number: %s", what, err.Error()))
+	}
+
+	return int32(path)
+}
+
+// extractOperationOptionFromMethodDescriptor extracts the message of type
+// swagger_options.Operation from a given proto method's descriptor.
+func extractOperationOptionFromMethodDescriptor(meth *pbdescriptor.MethodDescriptorProto) (*swagger_options.Operation, error) {
+	if meth.Options == nil {
+		return nil, nil
+	}
+	if !proto.HasExtension(meth.Options, swagger_options.E_Openapiv2Operation) {
+		return nil, nil
+	}
+	ext, err := proto.GetExtension(meth.Options, swagger_options.E_Openapiv2Operation)
+	if err != nil {
+		return nil, err
+	}
+	opts, ok := ext.(*swagger_options.Operation)
+	if !ok {
+		return nil, fmt.Errorf("extension is %T; want an Operation", ext)
+	}
+	return opts, nil
+}
+
+// extractSchemaOptionFromMessageDescriptor extracts the message of type
+// swagger_options.Schema from a given proto message's descriptor.
+func extractSchemaOptionFromMessageDescriptor(msg *pbdescriptor.DescriptorProto) (*swagger_options.Schema, error) {
+	if msg.Options == nil {
+		return nil, nil
+	}
+	if !proto.HasExtension(msg.Options, swagger_options.E_Openapiv2Schema) {
+		return nil, nil
+	}
+	ext, err := proto.GetExtension(msg.Options, swagger_options.E_Openapiv2Schema)
+	if err != nil {
+		return nil, err
+	}
+	opts, ok := ext.(*swagger_options.Schema)
+	if !ok {
+		return nil, fmt.Errorf("extension is %T; want a Schema", ext)
+	}
+	return opts, nil
+}
+
+// extractSwaggerOptionFromFileDescriptor extracts the message of type
+// swagger_options.Swagger from a given proto method's descriptor.
+func extractSwaggerOptionFromFileDescriptor(file *pbdescriptor.FileDescriptorProto) (*swagger_options.Swagger, error) {
+	if file.Options == nil {
+		return nil, nil
+	}
+	if !proto.HasExtension(file.Options, swagger_options.E_Openapiv2Swagger) {
+		return nil, nil
+	}
+	ext, err := proto.GetExtension(file.Options, swagger_options.E_Openapiv2Swagger)
+	if err != nil {
+		return nil, err
+	}
+	opts, ok := ext.(*swagger_options.Swagger)
+	if !ok {
+		return nil, fmt.Errorf("extension is %T; want a Swagger object", ext)
+	}
+	return opts, nil
+}
+
+func extractJSONSchemaFromFieldDescriptor(fd *pbdescriptor.FieldDescriptorProto) (*swagger_options.JSONSchema, error) {
+	if fd.Options == nil {
+		return nil, nil
+	}
+	if !proto.HasExtension(fd.Options, swagger_options.E_Openapiv2Field) {
+		return nil, nil
+	}
+	ext, err := proto.GetExtension(fd.Options, swagger_options.E_Openapiv2Field)
+	if err != nil {
+		return nil, err
+	}
+	opts, ok := ext.(*swagger_options.JSONSchema)
+	if !ok {
+		return nil, fmt.Errorf("extension is %T; want a JSONSchema object", ext)
+	}
+	return opts, nil
+}
+
+func protoJSONSchemaToSwaggerSchemaCore(j *swagger_options.JSONSchema, reg *descriptor.Registry, refs refMap) schemaCore {
+	ret := schemaCore{}
+
+	if j.GetRef() != "" {
+		swaggerName := fullyQualifiedNameToSwaggerName(j.GetRef(), reg)
+		if swaggerName != "" {
+			ret.Ref = "#/definitions/" + swaggerName
+			if refs != nil {
+				refs[j.GetRef()] = struct{}{}
+			}
+		} else {
+			ret.Ref += j.GetRef()
+		}
+	} else {
+		f, t := protoJSONSchemaTypeToFormat(j.GetType())
+		ret.Format = f
+		ret.Type = t
+	}
+
+	return ret
+}
+
+func updateSwaggerObjectFromJSONSchema(s *swaggerSchemaObject, j *swagger_options.JSONSchema) {
+	s.Title = j.GetTitle()
+	s.Description = j.GetDescription()
+	s.ReadOnly = j.GetReadOnly()
+	s.MultipleOf = j.GetMultipleOf()
+	s.Maximum = j.GetMaximum()
+	s.ExclusiveMaximum = j.GetExclusiveMaximum()
+	s.Minimum = j.GetMinimum()
+	s.ExclusiveMinimum = j.GetExclusiveMinimum()
+	s.MaxLength = j.GetMaxLength()
+	s.MinLength = j.GetMinLength()
+	s.Pattern = j.GetPattern()
+	s.Default = j.GetDefault()
+	s.MaxItems = j.GetMaxItems()
+	s.MinItems = j.GetMinItems()
+	s.UniqueItems = j.GetUniqueItems()
+	s.MaxProperties = j.GetMaxProperties()
+	s.MinProperties = j.GetMinProperties()
+	s.Required = j.GetRequired()
+	if overrideType := j.GetType(); len(overrideType) > 0 {
+		s.Type = strings.ToLower(overrideType[0].String())
+	}
+}
+
+func swaggerSchemaFromProtoSchema(s *swagger_options.Schema, reg *descriptor.Registry, refs refMap) swaggerSchemaObject {
+	ret := swaggerSchemaObject{
+		ExternalDocs: protoExternalDocumentationToSwaggerExternalDocumentation(s.GetExternalDocs()),
+	}
+
+	ret.schemaCore = protoJSONSchemaToSwaggerSchemaCore(s.GetJsonSchema(), reg, refs)
+	updateSwaggerObjectFromJSONSchema(&ret, s.GetJsonSchema())
+
+	if s != nil && s.Example != nil {
+		ret.Example = json.RawMessage(s.Example.Value)
+	}
+
+	return ret
+}
+
+func protoJSONSchemaTypeToFormat(in []swagger_options.JSONSchema_JSONSchemaSimpleTypes) (string, string) {
+	if len(in) == 0 {
+		return "", ""
+	}
+
+	// Can't support more than 1 type, just return the first element.
+	// This is due to an inconsistency in the design of the openapiv2 proto
+	// and that used in schemaCore. schemaCore uses the v3 definition of types,
+	// which only allows a single string, while the openapiv2 proto uses the OpenAPI v2
+	// definition, which defers to the JSON schema definition, which allows a string or an array.
+	// Sources:
+	// https://swagger.io/specification/#itemsObject
+	// https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.2
+	switch in[0] {
+	case swagger_options.JSONSchema_UNKNOWN, swagger_options.JSONSchema_NULL:
+		return "", ""
+	case swagger_options.JSONSchema_OBJECT:
+		return "object", ""
+	case swagger_options.JSONSchema_ARRAY:
+		return "array", ""
+	case swagger_options.JSONSchema_BOOLEAN:
+		return "boolean", "boolean"
+	case swagger_options.JSONSchema_INTEGER:
+		return "integer", "int32"
+	case swagger_options.JSONSchema_NUMBER:
+		return "number", "double"
+	case swagger_options.JSONSchema_STRING:
+		// NOTE: in swagger specifition, format should be empty on string type
+		return "string", ""
+	default:
+		// Maybe panic?
+		return "", ""
+	}
+}
+
+func protoExternalDocumentationToSwaggerExternalDocumentation(in *swagger_options.ExternalDocumentation) *swaggerExternalDocumentationObject {
+	if in == nil {
+		return nil
+	}
+
+	return &swaggerExternalDocumentationObject{
+		Description: in.Description,
+		URL:         in.Url,
+	}
+}
+
+func addCustomRefs(d swaggerDefinitionsObject, reg *descriptor.Registry, refs refMap) {
+	if len(refs) == 0 {
+		return
+	}
+	msgMap := make(messageMap)
+	enumMap := make(enumMap)
+	for ref := range refs {
+		if _, ok := d[fullyQualifiedNameToSwaggerName(ref, reg)]; ok {
+			// Skip already existing definitions
+			delete(refs, ref)
+			continue
+		}
+		msg, err := reg.LookupMsg("", ref)
+		if err == nil {
+			msgMap[fullyQualifiedNameToSwaggerName(ref, reg)] = msg
+			continue
+		}
+		enum, err := reg.LookupEnum("", ref)
+		if err == nil {
+			enumMap[fullyQualifiedNameToSwaggerName(ref, reg)] = enum
+			continue
+		}
+
+		// ?? Should be either enum or msg
+	}
+	renderMessagesAsDefinition(msgMap, d, reg, refs)
+	renderEnumerationsAsDefinition(enumMap, d, reg)
+
+	// Run again in case any new refs were added
+	addCustomRefs(d, reg, refs)
+}
+
+func lowerCamelCase(parameter string) string {
+	parameterString := gogen.CamelCase(parameter)
+	builder := &strings.Builder{}
+	builder.WriteString(strings.ToLower(string(parameterString[0])))
+	builder.WriteString(parameterString[1:])
+	return builder.String()
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go
new file mode 100644
index 0000000..77db96d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go
@@ -0,0 +1,254 @@
+package genswagger
+
+import (
+	"bytes"
+	"encoding/json"
+
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+)
+
+type param struct {
+	*descriptor.File
+	reg *descriptor.Registry
+}
+
+type binding struct {
+	*descriptor.Binding
+}
+
+// http://swagger.io/specification/#infoObject
+type swaggerInfoObject struct {
+	Title          string `json:"title"`
+	Description    string `json:"description,omitempty"`
+	TermsOfService string `json:"termsOfService,omitempty"`
+	Version        string `json:"version"`
+
+	Contact *swaggerContactObject `json:"contact,omitempty"`
+	License *swaggerLicenseObject `json:"license,omitempty"`
+
+	extensions []extension
+}
+
+// http://swagger.io/specification/#contactObject
+type swaggerContactObject struct {
+	Name  string `json:"name,omitempty"`
+	URL   string `json:"url,omitempty"`
+	Email string `json:"email,omitempty"`
+}
+
+// http://swagger.io/specification/#licenseObject
+type swaggerLicenseObject struct {
+	Name string `json:"name,omitempty"`
+	URL  string `json:"url,omitempty"`
+}
+
+// http://swagger.io/specification/#externalDocumentationObject
+type swaggerExternalDocumentationObject struct {
+	Description string `json:"description,omitempty"`
+	URL         string `json:"url,omitempty"`
+}
+
+type extension struct {
+	key   string
+	value json.RawMessage
+}
+
+// http://swagger.io/specification/#swaggerObject
+type swaggerObject struct {
+	Swagger             string                              `json:"swagger"`
+	Info                swaggerInfoObject                   `json:"info"`
+	Host                string                              `json:"host,omitempty"`
+	BasePath            string                              `json:"basePath,omitempty"`
+	Schemes             []string                            `json:"schemes"`
+	Consumes            []string                            `json:"consumes"`
+	Produces            []string                            `json:"produces"`
+	Paths               swaggerPathsObject                  `json:"paths"`
+	Definitions         swaggerDefinitionsObject            `json:"definitions"`
+	StreamDefinitions   swaggerDefinitionsObject            `json:"x-stream-definitions,omitempty"`
+	SecurityDefinitions swaggerSecurityDefinitionsObject    `json:"securityDefinitions,omitempty"`
+	Security            []swaggerSecurityRequirementObject  `json:"security,omitempty"`
+	ExternalDocs        *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"`
+
+	extensions []extension
+}
+
+// http://swagger.io/specification/#securityDefinitionsObject
+type swaggerSecurityDefinitionsObject map[string]swaggerSecuritySchemeObject
+
+// http://swagger.io/specification/#securitySchemeObject
+type swaggerSecuritySchemeObject struct {
+	Type             string              `json:"type"`
+	Description      string              `json:"description,omitempty"`
+	Name             string              `json:"name,omitempty"`
+	In               string              `json:"in,omitempty"`
+	Flow             string              `json:"flow,omitempty"`
+	AuthorizationURL string              `json:"authorizationUrl,omitempty"`
+	TokenURL         string              `json:"tokenUrl,omitempty"`
+	Scopes           swaggerScopesObject `json:"scopes,omitempty"`
+
+	extensions []extension
+}
+
+// http://swagger.io/specification/#scopesObject
+type swaggerScopesObject map[string]string
+
+// http://swagger.io/specification/#securityRequirementObject
+type swaggerSecurityRequirementObject map[string][]string
+
+// http://swagger.io/specification/#pathsObject
+type swaggerPathsObject map[string]swaggerPathItemObject
+
+// http://swagger.io/specification/#pathItemObject
+type swaggerPathItemObject struct {
+	Get    *swaggerOperationObject `json:"get,omitempty"`
+	Delete *swaggerOperationObject `json:"delete,omitempty"`
+	Post   *swaggerOperationObject `json:"post,omitempty"`
+	Put    *swaggerOperationObject `json:"put,omitempty"`
+	Patch  *swaggerOperationObject `json:"patch,omitempty"`
+}
+
+// http://swagger.io/specification/#operationObject
+type swaggerOperationObject struct {
+	Summary     string                  `json:"summary,omitempty"`
+	Description string                  `json:"description,omitempty"`
+	OperationID string                  `json:"operationId"`
+	Responses   swaggerResponsesObject  `json:"responses"`
+	Parameters  swaggerParametersObject `json:"parameters,omitempty"`
+	Tags        []string                `json:"tags,omitempty"`
+	Deprecated  bool                    `json:"deprecated,omitempty"`
+
+	Security     *[]swaggerSecurityRequirementObject `json:"security,omitempty"`
+	ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"`
+
+	extensions []extension
+}
+
+type swaggerParametersObject []swaggerParameterObject
+
+// http://swagger.io/specification/#parameterObject
+type swaggerParameterObject struct {
+	Name             string              `json:"name"`
+	Description      string              `json:"description,omitempty"`
+	In               string              `json:"in,omitempty"`
+	Required         bool                `json:"required"`
+	Type             string              `json:"type,omitempty"`
+	Format           string              `json:"format,omitempty"`
+	Items            *swaggerItemsObject `json:"items,omitempty"`
+	Enum             []string            `json:"enum,omitempty"`
+	CollectionFormat string              `json:"collectionFormat,omitempty"`
+	Default          string              `json:"default,omitempty"`
+	MinItems         *int                `json:"minItems,omitempty"`
+
+	// Or you can explicitly refer to another type. If this is defined all
+	// other fields should be empty
+	Schema *swaggerSchemaObject `json:"schema,omitempty"`
+}
+
+// core part of schema, which is common to itemsObject and schemaObject.
+// http://swagger.io/specification/#itemsObject
+type schemaCore struct {
+	Type    string          `json:"type,omitempty"`
+	Format  string          `json:"format,omitempty"`
+	Ref     string          `json:"$ref,omitempty"`
+	Example json.RawMessage `json:"example,omitempty"`
+
+	Items *swaggerItemsObject `json:"items,omitempty"`
+
+	// If the item is an enumeration include a list of all the *NAMES* of the
+	// enum values.  I'm not sure how well this will work but assuming all enums
+	// start from 0 index it will be great. I don't think that is a good assumption.
+	Enum    []string `json:"enum,omitempty"`
+	Default string   `json:"default,omitempty"`
+}
+
+type swaggerItemsObject schemaCore
+
+// http://swagger.io/specification/#responsesObject
+type swaggerResponsesObject map[string]swaggerResponseObject
+
+// http://swagger.io/specification/#responseObject
+type swaggerResponseObject struct {
+	Description string              `json:"description"`
+	Schema      swaggerSchemaObject `json:"schema"`
+
+	extensions []extension
+}
+
+type keyVal struct {
+	Key   string
+	Value interface{}
+}
+
+type swaggerSchemaObjectProperties []keyVal
+
+func (op swaggerSchemaObjectProperties) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	buf.WriteString("{")
+	for i, kv := range op {
+		if i != 0 {
+			buf.WriteString(",")
+		}
+		key, err := json.Marshal(kv.Key)
+		if err != nil {
+			return nil, err
+		}
+		buf.Write(key)
+		buf.WriteString(":")
+		val, err := json.Marshal(kv.Value)
+		if err != nil {
+			return nil, err
+		}
+		buf.Write(val)
+	}
+
+	buf.WriteString("}")
+	return buf.Bytes(), nil
+}
+
+// http://swagger.io/specification/#schemaObject
+type swaggerSchemaObject struct {
+	schemaCore
+	// Properties can be recursively defined
+	Properties           *swaggerSchemaObjectProperties `json:"properties,omitempty"`
+	AdditionalProperties *swaggerSchemaObject           `json:"additionalProperties,omitempty"`
+
+	Description string `json:"description,omitempty"`
+	Title       string `json:"title,omitempty"`
+
+	ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"`
+
+	ReadOnly         bool     `json:"readOnly,omitempty"`
+	MultipleOf       float64  `json:"multipleOf,omitempty"`
+	Maximum          float64  `json:"maximum,omitempty"`
+	ExclusiveMaximum bool     `json:"exclusiveMaximum,omitempty"`
+	Minimum          float64  `json:"minimum,omitempty"`
+	ExclusiveMinimum bool     `json:"exclusiveMinimum,omitempty"`
+	MaxLength        uint64   `json:"maxLength,omitempty"`
+	MinLength        uint64   `json:"minLength,omitempty"`
+	Pattern          string   `json:"pattern,omitempty"`
+	MaxItems         uint64   `json:"maxItems,omitempty"`
+	MinItems         uint64   `json:"minItems,omitempty"`
+	UniqueItems      bool     `json:"uniqueItems,omitempty"`
+	MaxProperties    uint64   `json:"maxProperties,omitempty"`
+	MinProperties    uint64   `json:"minProperties,omitempty"`
+	Required         []string `json:"required,omitempty"`
+}
+
+// http://swagger.io/specification/#referenceObject
+type swaggerReferenceObject struct {
+	Ref string `json:"$ref"`
+}
+
+// http://swagger.io/specification/#definitionsObject
+type swaggerDefinitionsObject map[string]swaggerSchemaObject
+
+// Internal type mapping from FQMN to descriptor.Message. Used as a set by the
+// findServiceMessages function.
+type messageMap map[string]*descriptor.Message
+
+// Internal type mapping from FQEN to descriptor.Enum. Used as a set by the
+// findServiceMessages function.
+type enumMap map[string]*descriptor.Enum
+
+// Internal type to store used references.
+type refMap map[string]struct{}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go
new file mode 100644
index 0000000..237e460
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go
@@ -0,0 +1,198 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/golang/protobuf/proto"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+	"github.com/grpc-ecosystem/grpc-gateway/codegenerator"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
+	"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger"
+)
+
+var (
+	importPrefix               = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files")
+	file                       = flag.String("file", "-", "where to load data from")
+	allowDeleteBody            = flag.Bool("allow_delete_body", false, "unless set, HTTP DELETE methods may not have a body")
+	grpcAPIConfiguration       = flag.String("grpc_api_configuration", "", "path to gRPC API Configuration in YAML format")
+	allowMerge                 = flag.Bool("allow_merge", false, "if set, generation one swagger file out of multiple protos")
+	mergeFileName              = flag.String("merge_file_name", "apidocs", "target swagger file name prefix after merge")
+	useJSONNamesForFields      = flag.Bool("json_names_for_fields", false, "if it sets Field.GetJsonName() will be used for generating swagger definitions, otherwise Field.GetName() will be used")
+	repeatedPathParamSeparator = flag.String("repeated_path_param_separator", "csv", "configures how repeated fields should be split. Allowed values are `csv`, `pipes`, `ssv` and `tsv`.")
+	versionFlag                = flag.Bool("version", false, "print the current verison")
+	allowRepeatedFieldsInBody  = flag.Bool("allow_repeated_fields_in_body", false, "allows to use repeated field in `body` and `response_body` field of `google.api.http` annotation option")
+	includePackageInTags       = flag.Bool("include_package_in_tags", false, "if unset, the gRPC service name is added to the `Tags` field of each operation. if set and the `package` directive is shown in the proto file, the package name will be prepended to the service name")
+	useFQNForSwaggerName       = flag.Bool("fqn_for_swagger_name", false, "if set, the object's swagger names will use the fully qualify name from the proto definition (ie my.package.MyMessage.MyInnerMessage")
+)
+
+// Variables set by goreleaser at build time
+var (
+	version = "dev"
+	commit  = "unknown"
+	date    = "unknown"
+)
+
+func main() {
+	flag.Parse()
+	defer glog.Flush()
+
+	if *versionFlag {
+		fmt.Printf("Version %v, commit %v, built at %v\n", version, commit, date)
+		os.Exit(0)
+	}
+
+	reg := descriptor.NewRegistry()
+
+	glog.V(1).Info("Processing code generator request")
+	f := os.Stdin
+	if *file != "-" {
+		var err error
+		f, err = os.Open(*file)
+		if err != nil {
+			glog.Fatal(err)
+		}
+	}
+	glog.V(1).Info("Parsing code generator request")
+	req, err := codegenerator.ParseRequest(f)
+	if err != nil {
+		glog.Fatal(err)
+	}
+	glog.V(1).Info("Parsed code generator request")
+	pkgMap := make(map[string]string)
+	if req.Parameter != nil {
+		err := parseReqParam(req.GetParameter(), flag.CommandLine, pkgMap)
+		if err != nil {
+			glog.Fatalf("Error parsing flags: %v", err)
+		}
+	}
+
+	reg.SetPrefix(*importPrefix)
+	reg.SetAllowDeleteBody(*allowDeleteBody)
+	reg.SetAllowMerge(*allowMerge)
+	reg.SetMergeFileName(*mergeFileName)
+	reg.SetUseJSONNamesForFields(*useJSONNamesForFields)
+	reg.SetAllowRepeatedFieldsInBody(*allowRepeatedFieldsInBody)
+	reg.SetIncludePackageInTags(*includePackageInTags)
+	reg.SetUseFQNForSwaggerName(*useFQNForSwaggerName)
+	if err := reg.SetRepeatedPathParamSeparator(*repeatedPathParamSeparator); err != nil {
+		emitError(err)
+		return
+	}
+	for k, v := range pkgMap {
+		reg.AddPkgMap(k, v)
+	}
+
+	if *grpcAPIConfiguration != "" {
+		if err := reg.LoadGrpcAPIServiceFromYAML(*grpcAPIConfiguration); err != nil {
+			emitError(err)
+			return
+		}
+	}
+
+	g := genswagger.New(reg)
+
+	if err := genswagger.AddStreamError(reg); err != nil {
+		emitError(err)
+		return
+	}
+
+	if err := reg.Load(req); err != nil {
+		emitError(err)
+		return
+	}
+
+	var targets []*descriptor.File
+	for _, target := range req.FileToGenerate {
+		f, err := reg.LookupFile(target)
+		if err != nil {
+			glog.Fatal(err)
+		}
+		targets = append(targets, f)
+	}
+
+	out, err := g.Generate(targets)
+	glog.V(1).Info("Processed code generator request")
+	if err != nil {
+		emitError(err)
+		return
+	}
+	emitFiles(out)
+}
+
+func emitFiles(out []*plugin.CodeGeneratorResponse_File) {
+	emitResp(&plugin.CodeGeneratorResponse{File: out})
+}
+
+func emitError(err error) {
+	emitResp(&plugin.CodeGeneratorResponse{Error: proto.String(err.Error())})
+}
+
+func emitResp(resp *plugin.CodeGeneratorResponse) {
+	buf, err := proto.Marshal(resp)
+	if err != nil {
+		glog.Fatal(err)
+	}
+	if _, err := os.Stdout.Write(buf); err != nil {
+		glog.Fatal(err)
+	}
+}
+
+// parseReqParam parses a CodeGeneratorRequest parameter and adds the
+// extracted values to the given FlagSet and pkgMap. Returns a non-nil
+// error if setting a flag failed.
+func parseReqParam(param string, f *flag.FlagSet, pkgMap map[string]string) error {
+	if param == "" {
+		return nil
+	}
+	for _, p := range strings.Split(param, ",") {
+		spec := strings.SplitN(p, "=", 2)
+		if len(spec) == 1 {
+			if spec[0] == "allow_delete_body" {
+				err := f.Set(spec[0], "true")
+				if err != nil {
+					return fmt.Errorf("Cannot set flag %s: %v", p, err)
+				}
+				continue
+			}
+			if spec[0] == "allow_merge" {
+				err := f.Set(spec[0], "true")
+				if err != nil {
+					return fmt.Errorf("Cannot set flag %s: %v", p, err)
+				}
+				continue
+			}
+			if spec[0] == "allow_repeated_fields_in_body" {
+				err := f.Set(spec[0], "true")
+				if err != nil {
+					return fmt.Errorf("Cannot set flag %s: %v", p, err)
+				}
+				continue
+			}
+			if spec[0] == "include_package_in_tags" {
+				err := f.Set(spec[0], "true")
+				if err != nil {
+					return fmt.Errorf("Cannot set flag %s: %v", p, err)
+				}
+				continue
+			}
+			err := f.Set(spec[0], "")
+			if err != nil {
+				return fmt.Errorf("Cannot set flag %s: %v", p, err)
+			}
+			continue
+		}
+		name, value := spec[0], spec[1]
+		if strings.HasPrefix(name, "M") {
+			pkgMap[name[1:]] = value
+			continue
+		}
+		if err := f.Set(name, value); err != nil {
+			return fmt.Errorf("Cannot set flag %s: %v", p, err)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/BUILD.bazel
new file mode 100644
index 0000000..8dea43d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/BUILD.bazel
@@ -0,0 +1,38 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "options_proto_files",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+)
+
+go_library(
+    name = "go_default_library",
+    embed = [":options_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options",
+)
+
+proto_library(
+    name = "options_proto",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+    deps = [
+        "@com_google_protobuf//:any_proto",
+        "@com_google_protobuf//:descriptor_proto",
+        "@com_google_protobuf//:struct_proto",
+    ],
+)
+
+go_proto_library(
+    name = "options_go_proto",
+    compilers = ["@io_bazel_rules_go//proto:go_grpc"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options",
+    proto = ":options_proto",
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go
new file mode 100644
index 0000000..9fc282b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go
@@ -0,0 +1,103 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: protoc-gen-swagger/options/annotations.proto
+
+package options // import "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+var E_Openapiv2Swagger = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*Swagger)(nil),
+	Field:         1042,
+	Name:          "grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger",
+	Tag:           "bytes,1042,opt,name=openapiv2_swagger,json=openapiv2Swagger",
+	Filename:      "protoc-gen-swagger/options/annotations.proto",
+}
+
+var E_Openapiv2Operation = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MethodOptions)(nil),
+	ExtensionType: (*Operation)(nil),
+	Field:         1042,
+	Name:          "grpc.gateway.protoc_gen_swagger.options.openapiv2_operation",
+	Tag:           "bytes,1042,opt,name=openapiv2_operation,json=openapiv2Operation",
+	Filename:      "protoc-gen-swagger/options/annotations.proto",
+}
+
+var E_Openapiv2Schema = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*Schema)(nil),
+	Field:         1042,
+	Name:          "grpc.gateway.protoc_gen_swagger.options.openapiv2_schema",
+	Tag:           "bytes,1042,opt,name=openapiv2_schema,json=openapiv2Schema",
+	Filename:      "protoc-gen-swagger/options/annotations.proto",
+}
+
+var E_Openapiv2Tag = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.ServiceOptions)(nil),
+	ExtensionType: (*Tag)(nil),
+	Field:         1042,
+	Name:          "grpc.gateway.protoc_gen_swagger.options.openapiv2_tag",
+	Tag:           "bytes,1042,opt,name=openapiv2_tag,json=openapiv2Tag",
+	Filename:      "protoc-gen-swagger/options/annotations.proto",
+}
+
+var E_Openapiv2Field = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*JSONSchema)(nil),
+	Field:         1042,
+	Name:          "grpc.gateway.protoc_gen_swagger.options.openapiv2_field",
+	Tag:           "bytes,1042,opt,name=openapiv2_field,json=openapiv2Field",
+	Filename:      "protoc-gen-swagger/options/annotations.proto",
+}
+
+func init() {
+	proto.RegisterExtension(E_Openapiv2Swagger)
+	proto.RegisterExtension(E_Openapiv2Operation)
+	proto.RegisterExtension(E_Openapiv2Schema)
+	proto.RegisterExtension(E_Openapiv2Tag)
+	proto.RegisterExtension(E_Openapiv2Field)
+}
+
+func init() {
+	proto.RegisterFile("protoc-gen-swagger/options/annotations.proto", fileDescriptor_annotations_8378bd63c2853a5a)
+}
+
+var fileDescriptor_annotations_8378bd63c2853a5a = []byte{
+	// 346 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4f, 0xea, 0x40,
+	0x14, 0xc5, 0xc3, 0xe6, 0xe5, 0xa5, 0xef, 0xa9, 0x58, 0x37, 0x86, 0xf8, 0x87, 0x9d, 0xc6, 0xc0,
+	0x8c, 0x81, 0x5d, 0x77, 0x6a, 0xe2, 0xc2, 0x44, 0x49, 0x0a, 0x2b, 0x37, 0x64, 0x18, 0x2e, 0x97,
+	0x49, 0x4a, 0xef, 0x64, 0x66, 0x80, 0x90, 0xb0, 0xf4, 0x13, 0xf8, 0x89, 0x8d, 0xd3, 0xd2, 0x9a,
+	0x8a, 0xa6, 0xbb, 0xde, 0xdb, 0x39, 0xe7, 0x77, 0x7a, 0x3a, 0x41, 0x47, 0x1b, 0x72, 0x24, 0xbb,
+	0x08, 0x69, 0xd7, 0xae, 0x05, 0x22, 0x18, 0x4e, 0xda, 0x29, 0x4a, 0x2d, 0x17, 0x69, 0x4a, 0x4e,
+	0xf8, 0x67, 0xe6, 0x8f, 0x85, 0x57, 0x68, 0xb4, 0x64, 0x28, 0x1c, 0xac, 0xc5, 0x26, 0xdb, 0xc9,
+	0x31, 0x42, 0x3a, 0xce, 0xa5, 0x2c, 0x97, 0xb6, 0x6e, 0x7e, 0xb1, 0x25, 0x0d, 0xa9, 0xd0, 0x6a,
+	0xd5, 0xcb, 0x0c, 0x5a, 0x6d, 0x24, 0xc2, 0x04, 0xb8, 0x9f, 0x26, 0xcb, 0x19, 0x9f, 0x82, 0x95,
+	0x46, 0x69, 0x47, 0x26, 0x3b, 0x11, 0x6d, 0x83, 0xe3, 0x42, 0xb4, 0x43, 0x85, 0x67, 0x2c, 0xd3,
+	0xb1, 0x9d, 0x8e, 0x3d, 0xaa, 0x04, 0x06, 0x19, 0xe4, 0xf4, 0xfd, 0x6f, 0xbb, 0x71, 0xfd, 0xaf,
+	0x77, 0xcb, 0x6a, 0x26, 0x66, 0xc3, 0x6c, 0x8e, 0x9b, 0x05, 0x29, 0xdf, 0x44, 0x6f, 0x8d, 0xe0,
+	0xa4, 0xc4, 0x93, 0x06, 0xe3, 0x3b, 0x09, 0x2f, 0xbe, 0x05, 0x78, 0x06, 0x37, 0xa7, 0x69, 0x25,
+	0x42, 0xaf, 0x76, 0x84, 0xc1, 0xce, 0x3a, 0x0e, 0x0b, 0x5e, 0xb1, 0x8b, 0xb6, 0x41, 0xf3, 0x4b,
+	0x09, 0x72, 0x0e, 0x0b, 0x11, 0x5e, 0xee, 0x89, 0x60, 0xad, 0xc0, 0x6a, 0x0d, 0xbc, 0x7e, 0x0d,
+	0xde, 0x38, 0x3e, 0x2a, 0x5b, 0xf0, 0x8b, 0xc8, 0x06, 0x07, 0x25, 0xdd, 0x09, 0xdc, 0x83, 0x1e,
+	0x82, 0x59, 0x29, 0x59, 0x45, 0x77, 0x6a, 0xa3, 0x47, 0x02, 0xe3, 0xff, 0x05, 0x64, 0x24, 0x30,
+	0xda, 0x06, 0x65, 0x8e, 0xf1, 0x4c, 0x41, 0x32, 0x0d, 0xcf, 0xf7, 0xfc, 0x75, 0x48, 0xaa, 0x9d,
+	0xf7, 0x6b, 0x43, 0x9f, 0x86, 0x83, 0x97, 0xfc, 0x9b, 0x0f, 0x0b, 0x96, 0xb7, 0xbc, 0x7f, 0x78,
+	0xbd, 0x43, 0xe5, 0xe6, 0xcb, 0x09, 0x93, 0xb4, 0xe0, 0x9f, 0x86, 0x5d, 0x90, 0x64, 0x37, 0xd6,
+	0x41, 0x3e, 0xe6, 0xfe, 0xfc, 0xe7, 0xcb, 0x3e, 0xf9, 0xe3, 0xdf, 0xf5, 0x3f, 0x02, 0x00, 0x00,
+	0xff, 0xff, 0x4b, 0xc4, 0x41, 0xfb, 0x68, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto
new file mode 100644
index 0000000..2c0f594
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto
@@ -0,0 +1,44 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_swagger.options;
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options";
+
+import "protoc-gen-swagger/options/openapiv2.proto";
+import "google/protobuf/descriptor.proto";
+
+extend google.protobuf.FileOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Swagger openapiv2_swagger = 1042;
+}
+extend google.protobuf.MethodOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Operation openapiv2_operation = 1042;
+}
+extend google.protobuf.MessageOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Schema openapiv2_schema = 1042;
+}
+extend google.protobuf.ServiceOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Tag openapiv2_tag = 1042;
+}
+extend google.protobuf.FieldOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  JSONSchema openapiv2_field = 1042;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go
new file mode 100644
index 0000000..6720071
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go
@@ -0,0 +1,1598 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: protoc-gen-swagger/options/openapiv2.proto
+
+package options // import "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import any "github.com/golang/protobuf/ptypes/any"
+import _struct "github.com/golang/protobuf/ptypes/struct"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Swagger_SwaggerScheme int32
+
+const (
+	Swagger_UNKNOWN Swagger_SwaggerScheme = 0
+	Swagger_HTTP    Swagger_SwaggerScheme = 1
+	Swagger_HTTPS   Swagger_SwaggerScheme = 2
+	Swagger_WS      Swagger_SwaggerScheme = 3
+	Swagger_WSS     Swagger_SwaggerScheme = 4
+)
+
+var Swagger_SwaggerScheme_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "HTTP",
+	2: "HTTPS",
+	3: "WS",
+	4: "WSS",
+}
+var Swagger_SwaggerScheme_value = map[string]int32{
+	"UNKNOWN": 0,
+	"HTTP":    1,
+	"HTTPS":   2,
+	"WS":      3,
+	"WSS":     4,
+}
+
+func (x Swagger_SwaggerScheme) String() string {
+	return proto.EnumName(Swagger_SwaggerScheme_name, int32(x))
+}
+func (Swagger_SwaggerScheme) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{0, 0}
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+	JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+	JSONSchema_ARRAY   JSONSchema_JSONSchemaSimpleTypes = 1
+	JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+	JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+	JSONSchema_NULL    JSONSchema_JSONSchemaSimpleTypes = 4
+	JSONSchema_NUMBER  JSONSchema_JSONSchemaSimpleTypes = 5
+	JSONSchema_OBJECT  JSONSchema_JSONSchemaSimpleTypes = 6
+	JSONSchema_STRING  JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+var JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "ARRAY",
+	2: "BOOLEAN",
+	3: "INTEGER",
+	4: "NULL",
+	5: "NUMBER",
+	6: "OBJECT",
+	7: "STRING",
+}
+var JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+	"UNKNOWN": 0,
+	"ARRAY":   1,
+	"BOOLEAN": 2,
+	"INTEGER": 3,
+	"NULL":    4,
+	"NUMBER":  5,
+	"OBJECT":  6,
+	"STRING":  7,
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+	return proto.EnumName(JSONSchema_JSONSchemaSimpleTypes_name, int32(x))
+}
+func (JSONSchema_JSONSchemaSimpleTypes) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{8, 0}
+}
+
+// Required. The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+	SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+	SecurityScheme_TYPE_BASIC   SecurityScheme_Type = 1
+	SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+	SecurityScheme_TYPE_OAUTH2  SecurityScheme_Type = 3
+)
+
+var SecurityScheme_Type_name = map[int32]string{
+	0: "TYPE_INVALID",
+	1: "TYPE_BASIC",
+	2: "TYPE_API_KEY",
+	3: "TYPE_OAUTH2",
+}
+var SecurityScheme_Type_value = map[string]int32{
+	"TYPE_INVALID": 0,
+	"TYPE_BASIC":   1,
+	"TYPE_API_KEY": 2,
+	"TYPE_OAUTH2":  3,
+}
+
+func (x SecurityScheme_Type) String() string {
+	return proto.EnumName(SecurityScheme_Type_name, int32(x))
+}
+func (SecurityScheme_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{11, 0}
+}
+
+// Required. The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+	SecurityScheme_IN_INVALID SecurityScheme_In = 0
+	SecurityScheme_IN_QUERY   SecurityScheme_In = 1
+	SecurityScheme_IN_HEADER  SecurityScheme_In = 2
+)
+
+var SecurityScheme_In_name = map[int32]string{
+	0: "IN_INVALID",
+	1: "IN_QUERY",
+	2: "IN_HEADER",
+}
+var SecurityScheme_In_value = map[string]int32{
+	"IN_INVALID": 0,
+	"IN_QUERY":   1,
+	"IN_HEADER":  2,
+}
+
+func (x SecurityScheme_In) String() string {
+	return proto.EnumName(SecurityScheme_In_name, int32(x))
+}
+func (SecurityScheme_In) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{11, 1}
+}
+
+// Required. The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+	SecurityScheme_FLOW_INVALID     SecurityScheme_Flow = 0
+	SecurityScheme_FLOW_IMPLICIT    SecurityScheme_Flow = 1
+	SecurityScheme_FLOW_PASSWORD    SecurityScheme_Flow = 2
+	SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+	SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+var SecurityScheme_Flow_name = map[int32]string{
+	0: "FLOW_INVALID",
+	1: "FLOW_IMPLICIT",
+	2: "FLOW_PASSWORD",
+	3: "FLOW_APPLICATION",
+	4: "FLOW_ACCESS_CODE",
+}
+var SecurityScheme_Flow_value = map[string]int32{
+	"FLOW_INVALID":     0,
+	"FLOW_IMPLICIT":    1,
+	"FLOW_PASSWORD":    2,
+	"FLOW_APPLICATION": 3,
+	"FLOW_ACCESS_CODE": 4,
+}
+
+func (x SecurityScheme_Flow) String() string {
+	return proto.EnumName(SecurityScheme_Flow_name, int32(x))
+}
+func (SecurityScheme_Flow) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{11, 2}
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// TODO(ivucica): document fields
+type Swagger struct {
+	Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+	Info    *Info  `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	Host    string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+	// `base_path` is the common prefix path used on all API endpoints (ie. /api, /v1, etc.). By adding this,
+	// it allows you to remove this portion from the path endpoints in your Swagger file making them easier
+	// to read. Note that using `base_path` does not change the endpoint paths that are generated in the resulting
+	// Swagger file. If you wish to use `base_path` with relatively generated Swagger paths, the
+	// `base_path` prefix must be manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath             string                    `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+	Schemes              []Swagger_SwaggerScheme   `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_swagger.options.Swagger_SwaggerScheme" json:"schemes,omitempty"`
+	Consumes             []string                  `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	Produces             []string                  `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	Responses            map[string]*Response      `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	SecurityDefinitions  *SecurityDefinitions      `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+	Security             []*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	ExternalDocs         *ExternalDocumentation    `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	Extensions           map[string]*_struct.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *Swagger) Reset()         { *m = Swagger{} }
+func (m *Swagger) String() string { return proto.CompactTextString(m) }
+func (*Swagger) ProtoMessage()    {}
+func (*Swagger) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{0}
+}
+func (m *Swagger) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Swagger.Unmarshal(m, b)
+}
+func (m *Swagger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Swagger.Marshal(b, m, deterministic)
+}
+func (dst *Swagger) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Swagger.Merge(dst, src)
+}
+func (m *Swagger) XXX_Size() int {
+	return xxx_messageInfo_Swagger.Size(m)
+}
+func (m *Swagger) XXX_DiscardUnknown() {
+	xxx_messageInfo_Swagger.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Swagger proto.InternalMessageInfo
+
+func (m *Swagger) GetSwagger() string {
+	if m != nil {
+		return m.Swagger
+	}
+	return ""
+}
+
+func (m *Swagger) GetInfo() *Info {
+	if m != nil {
+		return m.Info
+	}
+	return nil
+}
+
+func (m *Swagger) GetHost() string {
+	if m != nil {
+		return m.Host
+	}
+	return ""
+}
+
+func (m *Swagger) GetBasePath() string {
+	if m != nil {
+		return m.BasePath
+	}
+	return ""
+}
+
+func (m *Swagger) GetSchemes() []Swagger_SwaggerScheme {
+	if m != nil {
+		return m.Schemes
+	}
+	return nil
+}
+
+func (m *Swagger) GetConsumes() []string {
+	if m != nil {
+		return m.Consumes
+	}
+	return nil
+}
+
+func (m *Swagger) GetProduces() []string {
+	if m != nil {
+		return m.Produces
+	}
+	return nil
+}
+
+func (m *Swagger) GetResponses() map[string]*Response {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+func (m *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+	if m != nil {
+		return m.SecurityDefinitions
+	}
+	return nil
+}
+
+func (m *Swagger) GetSecurity() []*SecurityRequirement {
+	if m != nil {
+		return m.Security
+	}
+	return nil
+}
+
+func (m *Swagger) GetExternalDocs() *ExternalDocumentation {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Swagger) GetExtensions() map[string]*_struct.Value {
+	if m != nil {
+		return m.Extensions
+	}
+	return nil
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// TODO(ivucica): document fields
+type Operation struct {
+	Tags                 []string                  `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+	Summary              string                    `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+	Description          string                    `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	ExternalDocs         *ExternalDocumentation    `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	OperationId          string                    `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	Consumes             []string                  `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	Produces             []string                  `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	Responses            map[string]*Response      `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Schemes              []string                  `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"`
+	Deprecated           bool                      `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+	Security             []*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	Extensions           map[string]*_struct.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *Operation) Reset()         { *m = Operation{} }
+func (m *Operation) String() string { return proto.CompactTextString(m) }
+func (*Operation) ProtoMessage()    {}
+func (*Operation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{1}
+}
+func (m *Operation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Operation.Unmarshal(m, b)
+}
+func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Operation.Marshal(b, m, deterministic)
+}
+func (dst *Operation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Operation.Merge(dst, src)
+}
+func (m *Operation) XXX_Size() int {
+	return xxx_messageInfo_Operation.Size(m)
+}
+func (m *Operation) XXX_DiscardUnknown() {
+	xxx_messageInfo_Operation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Operation proto.InternalMessageInfo
+
+func (m *Operation) GetTags() []string {
+	if m != nil {
+		return m.Tags
+	}
+	return nil
+}
+
+func (m *Operation) GetSummary() string {
+	if m != nil {
+		return m.Summary
+	}
+	return ""
+}
+
+func (m *Operation) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Operation) GetExternalDocs() *ExternalDocumentation {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Operation) GetOperationId() string {
+	if m != nil {
+		return m.OperationId
+	}
+	return ""
+}
+
+func (m *Operation) GetConsumes() []string {
+	if m != nil {
+		return m.Consumes
+	}
+	return nil
+}
+
+func (m *Operation) GetProduces() []string {
+	if m != nil {
+		return m.Produces
+	}
+	return nil
+}
+
+func (m *Operation) GetResponses() map[string]*Response {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+func (m *Operation) GetSchemes() []string {
+	if m != nil {
+		return m.Schemes
+	}
+	return nil
+}
+
+func (m *Operation) GetDeprecated() bool {
+	if m != nil {
+		return m.Deprecated
+	}
+	return false
+}
+
+func (m *Operation) GetSecurity() []*SecurityRequirement {
+	if m != nil {
+		return m.Security
+	}
+	return nil
+}
+
+func (m *Operation) GetExtensions() map[string]*_struct.Value {
+	if m != nil {
+		return m.Extensions
+	}
+	return nil
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+type Response struct {
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema               *Schema                   `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+	Extensions           map[string]*_struct.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *Response) Reset()         { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage()    {}
+func (*Response) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{2}
+}
+func (m *Response) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Response.Unmarshal(m, b)
+}
+func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Response.Marshal(b, m, deterministic)
+}
+func (dst *Response) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Response.Merge(dst, src)
+}
+func (m *Response) XXX_Size() int {
+	return xxx_messageInfo_Response.Size(m)
+}
+func (m *Response) XXX_DiscardUnknown() {
+	xxx_messageInfo_Response.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Response proto.InternalMessageInfo
+
+func (m *Response) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Response) GetSchema() *Schema {
+	if m != nil {
+		return m.Schema
+	}
+	return nil
+}
+
+func (m *Response) GetExtensions() map[string]*_struct.Value {
+	if m != nil {
+		return m.Extensions
+	}
+	return nil
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// TODO(ivucica): document fields
+type Info struct {
+	Title                string                    `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	Description          string                    `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	TermsOfService       string                    `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+	Contact              *Contact                  `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+	License              *License                  `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+	Version              string                    `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+	Extensions           map[string]*_struct.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *Info) Reset()         { *m = Info{} }
+func (m *Info) String() string { return proto.CompactTextString(m) }
+func (*Info) ProtoMessage()    {}
+func (*Info) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{3}
+}
+func (m *Info) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Info.Unmarshal(m, b)
+}
+func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Info.Marshal(b, m, deterministic)
+}
+func (dst *Info) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Info.Merge(dst, src)
+}
+func (m *Info) XXX_Size() int {
+	return xxx_messageInfo_Info.Size(m)
+}
+func (m *Info) XXX_DiscardUnknown() {
+	xxx_messageInfo_Info.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Info proto.InternalMessageInfo
+
+func (m *Info) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *Info) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Info) GetTermsOfService() string {
+	if m != nil {
+		return m.TermsOfService
+	}
+	return ""
+}
+
+func (m *Info) GetContact() *Contact {
+	if m != nil {
+		return m.Contact
+	}
+	return nil
+}
+
+func (m *Info) GetLicense() *License {
+	if m != nil {
+		return m.License
+	}
+	return nil
+}
+
+func (m *Info) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Info) GetExtensions() map[string]*_struct.Value {
+	if m != nil {
+		return m.Extensions
+	}
+	return nil
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// TODO(ivucica): document fields
+type Contact struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Url                  string   `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	Email                string   `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Contact) Reset()         { *m = Contact{} }
+func (m *Contact) String() string { return proto.CompactTextString(m) }
+func (*Contact) ProtoMessage()    {}
+func (*Contact) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{4}
+}
+func (m *Contact) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Contact.Unmarshal(m, b)
+}
+func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Contact.Marshal(b, m, deterministic)
+}
+func (dst *Contact) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Contact.Merge(dst, src)
+}
+func (m *Contact) XXX_Size() int {
+	return xxx_messageInfo_Contact.Size(m)
+}
+func (m *Contact) XXX_DiscardUnknown() {
+	xxx_messageInfo_Contact.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Contact proto.InternalMessageInfo
+
+func (m *Contact) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Contact) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *Contact) GetEmail() string {
+	if m != nil {
+		return m.Email
+	}
+	return ""
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+type License struct {
+	// Required. The license name used for the API.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// A URL to the license used for the API.
+	Url                  string   `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *License) Reset()         { *m = License{} }
+func (m *License) String() string { return proto.CompactTextString(m) }
+func (*License) ProtoMessage()    {}
+func (*License) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{5}
+}
+func (m *License) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_License.Unmarshal(m, b)
+}
+func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_License.Marshal(b, m, deterministic)
+}
+func (dst *License) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_License.Merge(dst, src)
+}
+func (m *License) XXX_Size() int {
+	return xxx_messageInfo_License.Size(m)
+}
+func (m *License) XXX_DiscardUnknown() {
+	xxx_messageInfo_License.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_License proto.InternalMessageInfo
+
+func (m *License) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *License) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// TODO(ivucica): document fields
+type ExternalDocumentation struct {
+	Description          string   `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	Url                  string   `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ExternalDocumentation) Reset()         { *m = ExternalDocumentation{} }
+func (m *ExternalDocumentation) String() string { return proto.CompactTextString(m) }
+func (*ExternalDocumentation) ProtoMessage()    {}
+func (*ExternalDocumentation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{6}
+}
+func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExternalDocumentation.Unmarshal(m, b)
+}
+func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExternalDocumentation.Marshal(b, m, deterministic)
+}
+func (dst *ExternalDocumentation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExternalDocumentation.Merge(dst, src)
+}
+func (m *ExternalDocumentation) XXX_Size() int {
+	return xxx_messageInfo_ExternalDocumentation.Size(m)
+}
+func (m *ExternalDocumentation) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo
+
+func (m *ExternalDocumentation) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *ExternalDocumentation) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// TODO(ivucica): document fields
+type Schema struct {
+	JsonSchema           *JSONSchema            `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+	Discriminator        string                 `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+	ReadOnly             bool                   `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	ExternalDocs         *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	Example              *any.Any               `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *Schema) Reset()         { *m = Schema{} }
+func (m *Schema) String() string { return proto.CompactTextString(m) }
+func (*Schema) ProtoMessage()    {}
+func (*Schema) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{7}
+}
+func (m *Schema) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Schema.Unmarshal(m, b)
+}
+func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
+}
+func (dst *Schema) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Schema.Merge(dst, src)
+}
+func (m *Schema) XXX_Size() int {
+	return xxx_messageInfo_Schema.Size(m)
+}
+func (m *Schema) XXX_DiscardUnknown() {
+	xxx_messageInfo_Schema.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Schema proto.InternalMessageInfo
+
+func (m *Schema) GetJsonSchema() *JSONSchema {
+	if m != nil {
+		return m.JsonSchema
+	}
+	return nil
+}
+
+func (m *Schema) GetDiscriminator() string {
+	if m != nil {
+		return m.Discriminator
+	}
+	return ""
+}
+
+func (m *Schema) GetReadOnly() bool {
+	if m != nil {
+		return m.ReadOnly
+	}
+	return false
+}
+
+func (m *Schema) GetExternalDocs() *ExternalDocumentation {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Schema) GetExample() *any.Any {
+	if m != nil {
+		return m.Example
+	}
+	return nil
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// TODO(ivucica): document fields
+type JSONSchema struct {
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must be imported
+	// into the protofile. If no message is identified, the Ref will be used verbatim in
+	// the output.
+	// For example:
+	//  `ref: ".google.protobuf.Timestamp"`.
+	Ref              string   `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	Title            string   `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+	Description      string   `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	Default          string   `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+	ReadOnly         bool     `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	MultipleOf       float64  `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+	Maximum          float64  `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+	ExclusiveMaximum bool     `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+	Minimum          float64  `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+	ExclusiveMinimum bool     `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+	MaxLength        uint64   `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+	MinLength        uint64   `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+	Pattern          string   `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	MaxItems         uint64   `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+	MinItems         uint64   `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+	UniqueItems      bool     `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+	MaxProperties    uint64   `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+	MinProperties    uint64   `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+	Required         []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+	// Items in 'array' must be unique.
+	Array                []string                           `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+	Type                 []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_swagger.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *JSONSchema) Reset()         { *m = JSONSchema{} }
+func (m *JSONSchema) String() string { return proto.CompactTextString(m) }
+func (*JSONSchema) ProtoMessage()    {}
+func (*JSONSchema) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{8}
+}
+func (m *JSONSchema) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_JSONSchema.Unmarshal(m, b)
+}
+func (m *JSONSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_JSONSchema.Marshal(b, m, deterministic)
+}
+func (dst *JSONSchema) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_JSONSchema.Merge(dst, src)
+}
+func (m *JSONSchema) XXX_Size() int {
+	return xxx_messageInfo_JSONSchema.Size(m)
+}
+func (m *JSONSchema) XXX_DiscardUnknown() {
+	xxx_messageInfo_JSONSchema.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_JSONSchema proto.InternalMessageInfo
+
+func (m *JSONSchema) GetRef() string {
+	if m != nil {
+		return m.Ref
+	}
+	return ""
+}
+
+func (m *JSONSchema) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *JSONSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *JSONSchema) GetDefault() string {
+	if m != nil {
+		return m.Default
+	}
+	return ""
+}
+
+func (m *JSONSchema) GetReadOnly() bool {
+	if m != nil {
+		return m.ReadOnly
+	}
+	return false
+}
+
+func (m *JSONSchema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *JSONSchema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *JSONSchema) GetMaxLength() uint64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetMinLength() uint64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *JSONSchema) GetMaxItems() uint64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetMinItems() uint64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *JSONSchema) GetMaxProperties() uint64 {
+	if m != nil {
+		return m.MaxProperties
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetMinProperties() uint64 {
+	if m != nil {
+		return m.MinProperties
+	}
+	return 0
+}
+
+func (m *JSONSchema) GetRequired() []string {
+	if m != nil {
+		return m.Required
+	}
+	return nil
+}
+
+func (m *JSONSchema) GetArray() []string {
+	if m != nil {
+		return m.Array
+	}
+	return nil
+}
+
+func (m *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+// TODO(ivucica): document fields
+type Tag struct {
+	// TODO(ivucica): Description should be extracted from comments on the proto
+	// service object.
+	Description          string                 `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	ExternalDocs         *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *Tag) Reset()         { *m = Tag{} }
+func (m *Tag) String() string { return proto.CompactTextString(m) }
+func (*Tag) ProtoMessage()    {}
+func (*Tag) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{9}
+}
+func (m *Tag) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Tag.Unmarshal(m, b)
+}
+func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Tag.Marshal(b, m, deterministic)
+}
+func (dst *Tag) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Tag.Merge(dst, src)
+}
+func (m *Tag) XXX_Size() int {
+	return xxx_messageInfo_Tag.Size(m)
+}
+func (m *Tag) XXX_DiscardUnknown() {
+	xxx_messageInfo_Tag.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Tag proto.InternalMessageInfo
+
+func (m *Tag) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Tag) GetExternalDocs() *ExternalDocumentation {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+	// A single security scheme definition, mapping a "name" to the scheme it defines.
+	Security             map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SecurityDefinitions) Reset()         { *m = SecurityDefinitions{} }
+func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) }
+func (*SecurityDefinitions) ProtoMessage()    {}
+func (*SecurityDefinitions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{10}
+}
+func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b)
+}
+func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic)
+}
+func (dst *SecurityDefinitions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecurityDefinitions.Merge(dst, src)
+}
+func (m *SecurityDefinitions) XXX_Size() int {
+	return xxx_messageInfo_SecurityDefinitions.Size(m)
+}
+func (m *SecurityDefinitions) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo
+
+func (m *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+	if m != nil {
+		return m.Security
+	}
+	return nil
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+	// Required. The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_swagger.options.SecurityScheme_Type" json:"type,omitempty"`
+	// A short description for security scheme.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// Required. The name of the header or query parameter to be used.
+	//
+	// Valid for apiKey.
+	Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	// Required. The location of the API key. Valid values are "query" or "header".
+	//
+	// Valid for apiKey.
+	In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_swagger.options.SecurityScheme_In" json:"in,omitempty"`
+	// Required. The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	//
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_swagger.options.SecurityScheme_Flow" json:"flow,omitempty"`
+	// Required. The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	//
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+	// Required. The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	//
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+	// Required. The available scopes for the OAuth2 security scheme.
+	//
+	// Valid for oauth2.
+	Scopes               *Scopes                   `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+	Extensions           map[string]*_struct.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *SecurityScheme) Reset()         { *m = SecurityScheme{} }
+func (m *SecurityScheme) String() string { return proto.CompactTextString(m) }
+func (*SecurityScheme) ProtoMessage()    {}
+func (*SecurityScheme) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{11}
+}
+func (m *SecurityScheme) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SecurityScheme.Unmarshal(m, b)
+}
+func (m *SecurityScheme) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SecurityScheme.Marshal(b, m, deterministic)
+}
+func (dst *SecurityScheme) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecurityScheme.Merge(dst, src)
+}
+func (m *SecurityScheme) XXX_Size() int {
+	return xxx_messageInfo_SecurityScheme.Size(m)
+}
+func (m *SecurityScheme) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecurityScheme.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityScheme proto.InternalMessageInfo
+
+func (m *SecurityScheme) GetType() SecurityScheme_Type {
+	if m != nil {
+		return m.Type
+	}
+	return SecurityScheme_TYPE_INVALID
+}
+
+func (m *SecurityScheme) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *SecurityScheme) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *SecurityScheme) GetIn() SecurityScheme_In {
+	if m != nil {
+		return m.In
+	}
+	return SecurityScheme_IN_INVALID
+}
+
+func (m *SecurityScheme) GetFlow() SecurityScheme_Flow {
+	if m != nil {
+		return m.Flow
+	}
+	return SecurityScheme_FLOW_INVALID
+}
+
+func (m *SecurityScheme) GetAuthorizationUrl() string {
+	if m != nil {
+		return m.AuthorizationUrl
+	}
+	return ""
+}
+
+func (m *SecurityScheme) GetTokenUrl() string {
+	if m != nil {
+		return m.TokenUrl
+	}
+	return ""
+}
+
+func (m *SecurityScheme) GetScopes() *Scopes {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+func (m *SecurityScheme) GetExtensions() map[string]*_struct.Value {
+	if m != nil {
+		return m.Extensions
+	}
+	return nil
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement  map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                                                 `json:"-"`
+	XXX_unrecognized     []byte                                                   `json:"-"`
+	XXX_sizecache        int32                                                    `json:"-"`
+}
+
+func (m *SecurityRequirement) Reset()         { *m = SecurityRequirement{} }
+func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) }
+func (*SecurityRequirement) ProtoMessage()    {}
+func (*SecurityRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{12}
+}
+func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b)
+}
+func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic)
+}
+func (dst *SecurityRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecurityRequirement.Merge(dst, src)
+}
+func (m *SecurityRequirement) XXX_Size() int {
+	return xxx_messageInfo_SecurityRequirement.Size(m)
+}
+func (m *SecurityRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecurityRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo
+
+func (m *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+	if m != nil {
+		return m.SecurityRequirement
+	}
+	return nil
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+	Scope                []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SecurityRequirement_SecurityRequirementValue) Reset() {
+	*m = SecurityRequirement_SecurityRequirementValue{}
+}
+func (m *SecurityRequirement_SecurityRequirementValue) String() string {
+	return proto.CompactTextString(m)
+}
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+func (*SecurityRequirement_SecurityRequirementValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{12, 0}
+}
+func (m *SecurityRequirement_SecurityRequirementValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SecurityRequirement_SecurityRequirementValue.Unmarshal(m, b)
+}
+func (m *SecurityRequirement_SecurityRequirementValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SecurityRequirement_SecurityRequirementValue.Marshal(b, m, deterministic)
+}
+func (dst *SecurityRequirement_SecurityRequirementValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecurityRequirement_SecurityRequirementValue.Merge(dst, src)
+}
+func (m *SecurityRequirement_SecurityRequirementValue) XXX_Size() int {
+	return xxx_messageInfo_SecurityRequirement_SecurityRequirementValue.Size(m)
+}
+func (m *SecurityRequirement_SecurityRequirementValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecurityRequirement_SecurityRequirementValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityRequirement_SecurityRequirementValue proto.InternalMessageInfo
+
+func (m *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+	if m != nil {
+		return m.Scope
+	}
+	return nil
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope                map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Scopes) Reset()         { *m = Scopes{} }
+func (m *Scopes) String() string { return proto.CompactTextString(m) }
+func (*Scopes) ProtoMessage()    {}
+func (*Scopes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_openapiv2_7182f700aabb5117, []int{13}
+}
+func (m *Scopes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Scopes.Unmarshal(m, b)
+}
+func (m *Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Scopes.Marshal(b, m, deterministic)
+}
+func (dst *Scopes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Scopes.Merge(dst, src)
+}
+func (m *Scopes) XXX_Size() int {
+	return xxx_messageInfo_Scopes.Size(m)
+}
+func (m *Scopes) XXX_DiscardUnknown() {
+	xxx_messageInfo_Scopes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Scopes proto.InternalMessageInfo
+
+func (m *Scopes) GetScope() map[string]string {
+	if m != nil {
+		return m.Scope
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Swagger)(nil), "grpc.gateway.protoc_gen_swagger.options.Swagger")
+	proto.RegisterMapType((map[string]*_struct.Value)(nil), "grpc.gateway.protoc_gen_swagger.options.Swagger.ExtensionsEntry")
+	proto.RegisterMapType((map[string]*Response)(nil), "grpc.gateway.protoc_gen_swagger.options.Swagger.ResponsesEntry")
+	proto.RegisterType((*Operation)(nil), "grpc.gateway.protoc_gen_swagger.options.Operation")
+	proto.RegisterMapType((map[string]*_struct.Value)(nil), "grpc.gateway.protoc_gen_swagger.options.Operation.ExtensionsEntry")
+	proto.RegisterMapType((map[string]*Response)(nil), "grpc.gateway.protoc_gen_swagger.options.Operation.ResponsesEntry")
+	proto.RegisterType((*Response)(nil), "grpc.gateway.protoc_gen_swagger.options.Response")
+	proto.RegisterMapType((map[string]*_struct.Value)(nil), "grpc.gateway.protoc_gen_swagger.options.Response.ExtensionsEntry")
+	proto.RegisterType((*Info)(nil), "grpc.gateway.protoc_gen_swagger.options.Info")
+	proto.RegisterMapType((map[string]*_struct.Value)(nil), "grpc.gateway.protoc_gen_swagger.options.Info.ExtensionsEntry")
+	proto.RegisterType((*Contact)(nil), "grpc.gateway.protoc_gen_swagger.options.Contact")
+	proto.RegisterType((*License)(nil), "grpc.gateway.protoc_gen_swagger.options.License")
+	proto.RegisterType((*ExternalDocumentation)(nil), "grpc.gateway.protoc_gen_swagger.options.ExternalDocumentation")
+	proto.RegisterType((*Schema)(nil), "grpc.gateway.protoc_gen_swagger.options.Schema")
+	proto.RegisterType((*JSONSchema)(nil), "grpc.gateway.protoc_gen_swagger.options.JSONSchema")
+	proto.RegisterType((*Tag)(nil), "grpc.gateway.protoc_gen_swagger.options.Tag")
+	proto.RegisterType((*SecurityDefinitions)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityDefinitions")
+	proto.RegisterMapType((map[string]*SecurityScheme)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityDefinitions.SecurityEntry")
+	proto.RegisterType((*SecurityScheme)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityScheme")
+	proto.RegisterMapType((map[string]*_struct.Value)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityScheme.ExtensionsEntry")
+	proto.RegisterType((*SecurityRequirement)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityRequirement")
+	proto.RegisterMapType((map[string]*SecurityRequirement_SecurityRequirementValue)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityRequirement.SecurityRequirementEntry")
+	proto.RegisterType((*SecurityRequirement_SecurityRequirementValue)(nil), "grpc.gateway.protoc_gen_swagger.options.SecurityRequirement.SecurityRequirementValue")
+	proto.RegisterType((*Scopes)(nil), "grpc.gateway.protoc_gen_swagger.options.Scopes")
+	proto.RegisterMapType((map[string]string)(nil), "grpc.gateway.protoc_gen_swagger.options.Scopes.ScopeEntry")
+	proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.Swagger_SwaggerScheme", Swagger_SwaggerScheme_name, Swagger_SwaggerScheme_value)
+	proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.JSONSchema_JSONSchemaSimpleTypes", JSONSchema_JSONSchemaSimpleTypes_name, JSONSchema_JSONSchemaSimpleTypes_value)
+	proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.SecurityScheme_Type", SecurityScheme_Type_name, SecurityScheme_Type_value)
+	proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.SecurityScheme_In", SecurityScheme_In_name, SecurityScheme_In_value)
+	proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.SecurityScheme_Flow", SecurityScheme_Flow_name, SecurityScheme_Flow_value)
+}
+
+func init() {
+	proto.RegisterFile("protoc-gen-swagger/options/openapiv2.proto", fileDescriptor_openapiv2_7182f700aabb5117)
+}
+
+var fileDescriptor_openapiv2_7182f700aabb5117 = []byte{
+	// 1884 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x5b, 0x73, 0xdb, 0xc6,
+	0xf5, 0x0f, 0x48, 0x90, 0x04, 0x0f, 0x45, 0x7a, 0xbd, 0x96, 0xf3, 0x47, 0x18, 0xdb, 0x7f, 0x85,
+	0x4d, 0xa7, 0x1a, 0xbb, 0xa6, 0x12, 0xe5, 0xa1, 0x99, 0x4c, 0x6f, 0x94, 0xc4, 0xc8, 0x80, 0x65,
+	0x92, 0x05, 0xa9, 0x28, 0xee, 0x8c, 0x07, 0x85, 0xc0, 0x25, 0x85, 0x18, 0x17, 0x06, 0x17, 0x49,
+	0xec, 0x27, 0xe8, 0x73, 0xa7, 0xaf, 0xf9, 0x1e, 0x9d, 0x69, 0x9f, 0xfa, 0x09, 0xfa, 0x59, 0xda,
+	0xe9, 0x7b, 0x67, 0x2f, 0x20, 0x41, 0x91, 0xf1, 0x90, 0x72, 0x3c, 0x79, 0xe8, 0x13, 0xf7, 0xdc,
+	0x7e, 0xbb, 0x7b, 0xce, 0x9e, 0x0b, 0x08, 0x8f, 0x27, 0x61, 0x10, 0x07, 0xf6, 0xd3, 0x31, 0xf1,
+	0x9f, 0x46, 0x57, 0xd6, 0x78, 0x4c, 0xc2, 0xbd, 0x60, 0x12, 0x3b, 0x81, 0x1f, 0xed, 0x05, 0x13,
+	0xe2, 0x5b, 0x13, 0xe7, 0x72, 0xbf, 0xc9, 0x94, 0xf0, 0xcf, 0xc6, 0xe1, 0xc4, 0x6e, 0x8e, 0xad,
+	0x98, 0x5c, 0x59, 0x53, 0xce, 0xb3, 0xcd, 0x31, 0xf1, 0x4d, 0x61, 0xd8, 0x14, 0x86, 0xf5, 0x0f,
+	0xc6, 0x41, 0x30, 0x76, 0xc9, 0x1e, 0x53, 0x39, 0x4f, 0x46, 0x7b, 0x96, 0x2f, 0xf4, 0xeb, 0x0f,
+	0x6e, 0x8a, 0xa2, 0x38, 0x4c, 0xec, 0x98, 0x4b, 0x1b, 0x7f, 0x55, 0xa0, 0xd4, 0xe7, 0x60, 0x58,
+	0x85, 0x92, 0xc0, 0x55, 0xa5, 0x1d, 0x69, 0xb7, 0x6c, 0xa4, 0x24, 0x6e, 0x81, 0xec, 0xf8, 0xa3,
+	0x40, 0xcd, 0xed, 0x48, 0xbb, 0x95, 0xfd, 0xa7, 0xcd, 0x35, 0x8f, 0xd5, 0xd4, 0xfc, 0x51, 0x60,
+	0x30, 0x53, 0x8c, 0x41, 0xbe, 0x08, 0xa2, 0x58, 0xcd, 0x33, 0x64, 0xb6, 0xc6, 0x1f, 0x42, 0xf9,
+	0xdc, 0x8a, 0x88, 0x39, 0xb1, 0xe2, 0x0b, 0x55, 0x66, 0x02, 0x85, 0x32, 0x7a, 0x56, 0x7c, 0x81,
+	0xbf, 0x86, 0x52, 0x64, 0x5f, 0x10, 0x8f, 0x44, 0x6a, 0x61, 0x27, 0xbf, 0x5b, 0xdb, 0xff, 0xf5,
+	0xda, 0xdb, 0x8a, 0x0b, 0xa5, 0xbf, 0x7d, 0x06, 0x63, 0xa4, 0x70, 0xb8, 0x0e, 0x8a, 0x1d, 0xf8,
+	0x51, 0x42, 0xa1, 0x8b, 0x3b, 0x79, 0xba, 0x6b, 0x4a, 0x53, 0xd9, 0x24, 0x0c, 0x86, 0x89, 0x4d,
+	0x22, 0xb5, 0xc4, 0x65, 0x29, 0x8d, 0x5f, 0x41, 0x39, 0x24, 0xd1, 0x24, 0xf0, 0x23, 0x12, 0xa9,
+	0xb0, 0x93, 0xdf, 0xad, 0xec, 0xff, 0x66, 0xe3, 0x33, 0x19, 0x29, 0x42, 0xdb, 0x8f, 0xc3, 0xa9,
+	0x31, 0x47, 0xc4, 0x01, 0x6c, 0x47, 0xc4, 0x4e, 0x42, 0x27, 0x9e, 0x9a, 0x43, 0x32, 0x72, 0x7c,
+	0x87, 0x59, 0xaa, 0x15, 0xe6, 0xf4, 0x5f, 0xae, 0xbf, 0x93, 0x00, 0x39, 0x9a, 0x63, 0x18, 0xf7,
+	0xa2, 0x65, 0x26, 0xfe, 0x1a, 0x94, 0x94, 0xad, 0x6e, 0xb1, 0xeb, 0x6c, 0xbe, 0x89, 0x41, 0xbe,
+	0x4d, 0x9c, 0x90, 0x78, 0xc4, 0x8f, 0x8d, 0x19, 0x1a, 0xb6, 0xa1, 0x4a, 0xae, 0x63, 0x12, 0xfa,
+	0x96, 0x6b, 0x0e, 0x03, 0x3b, 0x52, 0x6b, 0xec, 0x0e, 0xeb, 0x47, 0xb0, 0x2d, 0xac, 0x8f, 0x02,
+	0x3b, 0xa1, 0xd8, 0x16, 0x65, 0x1b, 0x5b, 0x64, 0xce, 0x8e, 0xf0, 0x1f, 0x00, 0x28, 0xed, 0x47,
+	0xcc, 0x4b, 0x77, 0xd8, 0x05, 0x7e, 0xbb, 0x71, 0x3c, 0xda, 0x33, 0x08, 0x1e, 0x90, 0x0c, 0x66,
+	0x3d, 0x80, 0xda, 0x62, 0xb8, 0x30, 0x82, 0xfc, 0x6b, 0x32, 0x15, 0xe9, 0x41, 0x97, 0xf8, 0x18,
+	0x0a, 0x97, 0x96, 0x9b, 0x10, 0x91, 0x1b, 0x9f, 0xae, 0x7d, 0x80, 0x14, 0xd9, 0xe0, 0xf6, 0x5f,
+	0xe4, 0x3e, 0x97, 0xea, 0xa7, 0x70, 0xe7, 0xc6, 0x79, 0x56, 0xec, 0xf8, 0xf3, 0xc5, 0x1d, 0xdf,
+	0x6f, 0xf2, 0x04, 0x6f, 0xa6, 0x09, 0xde, 0xfc, 0x8a, 0x4a, 0x33, 0xb0, 0x8d, 0x03, 0xa8, 0x2e,
+	0xa4, 0x02, 0xae, 0x40, 0xe9, 0xb4, 0xf3, 0xbc, 0xd3, 0x3d, 0xeb, 0xa0, 0xf7, 0xb0, 0x02, 0xf2,
+	0xb3, 0xc1, 0xa0, 0x87, 0x24, 0x5c, 0x86, 0x02, 0x5d, 0xf5, 0x51, 0x0e, 0x17, 0x21, 0x77, 0xd6,
+	0x47, 0x79, 0x5c, 0x82, 0xfc, 0x59, 0xbf, 0x8f, 0x64, 0x5d, 0x56, 0x14, 0x54, 0xd6, 0x65, 0xa5,
+	0x8c, 0x40, 0x97, 0x95, 0x2a, 0xaa, 0x35, 0xfe, 0x51, 0x84, 0x72, 0x77, 0x42, 0x42, 0x16, 0x1b,
+	0x9a, 0xdf, 0xb1, 0x35, 0x8e, 0x54, 0x89, 0x25, 0x0d, 0x5b, 0xb3, 0x82, 0x92, 0x78, 0x9e, 0x15,
+	0x4e, 0xd9, 0x59, 0x69, 0x41, 0xe1, 0x24, 0xde, 0x81, 0xca, 0x90, 0x44, 0x76, 0xe8, 0x30, 0x67,
+	0x88, 0xa2, 0x90, 0x65, 0x2d, 0x3f, 0x21, 0xf9, 0x1d, 0x3c, 0xa1, 0x8f, 0x60, 0x2b, 0x48, 0x6f,
+	0x60, 0x3a, 0x43, 0xb5, 0xc0, 0xcf, 0x31, 0xe3, 0x69, 0xc3, 0x5b, 0x17, 0x0b, 0x33, 0x5b, 0x2c,
+	0xca, 0xec, 0x71, 0xb6, 0xd6, 0x3e, 0xfb, 0xcc, 0xad, 0x6f, 0x28, 0x17, 0xea, 0xbc, 0x3e, 0x02,
+	0xdb, 0x7b, 0x56, 0xdf, 0x1e, 0x01, 0x0c, 0xc9, 0x24, 0x24, 0xb6, 0x15, 0x93, 0x21, 0x2b, 0x1f,
+	0x8a, 0x91, 0xe1, 0xbc, 0xc3, 0xbc, 0x3f, 0x5f, 0x48, 0xc9, 0x2a, 0xc3, 0x3e, 0xb8, 0xc5, 0xad,
+	0xff, 0x07, 0x92, 0x92, 0x27, 0x54, 0xe3, 0x6f, 0x39, 0x50, 0xd2, 0x4d, 0x6f, 0x66, 0x85, 0xb4,
+	0x9c, 0x15, 0xc7, 0x50, 0x64, 0x51, 0xb6, 0xc4, 0x3e, 0x7b, 0xeb, 0x07, 0x8e, 0x99, 0x19, 0xc2,
+	0x1c, 0x5b, 0x0b, 0x91, 0x2a, 0x6c, 0xf8, 0x3e, 0xd3, 0x13, 0xbf, 0x31, 0x50, 0xef, 0xcc, 0x6f,
+	0x79, 0x56, 0x8e, 0x64, 0x54, 0x68, 0xfc, 0x33, 0x0f, 0x32, 0x9d, 0x31, 0xf0, 0x36, 0x14, 0x62,
+	0x27, 0x76, 0x89, 0x80, 0xe6, 0xc4, 0x4d, 0x7f, 0xe6, 0x96, 0xfd, 0xb9, 0x0b, 0x28, 0x26, 0xa1,
+	0x17, 0x99, 0xc1, 0xc8, 0x8c, 0x48, 0x78, 0xe9, 0xd8, 0x44, 0x14, 0xa3, 0x1a, 0xe3, 0x77, 0x47,
+	0x7d, 0xce, 0xc5, 0x3a, 0x94, 0xec, 0xc0, 0x8f, 0x2d, 0x3b, 0x16, 0x95, 0xe8, 0x93, 0xb5, 0xbd,
+	0x75, 0xc8, 0xed, 0x8c, 0x14, 0x80, 0x62, 0xb9, 0x8e, 0x4d, 0xfc, 0x88, 0xb0, 0x8a, 0xb3, 0x09,
+	0xd6, 0x09, 0xb7, 0x33, 0x52, 0x00, 0x5a, 0x06, 0x2e, 0x49, 0x48, 0x7d, 0xac, 0x16, 0x79, 0x8d,
+	0x15, 0x24, 0x7e, 0xb5, 0x10, 0xe2, 0x12, 0x0b, 0xf1, 0xaf, 0x36, 0x1a, 0xdd, 0x7e, 0x84, 0xf0,
+	0x36, 0xda, 0x50, 0x12, 0xfe, 0xa2, 0x2d, 0xc5, 0xb7, 0xbc, 0x34, 0xa6, 0x6c, 0x4d, 0xb7, 0x48,
+	0x42, 0x57, 0x84, 0x92, 0x2e, 0x69, 0xe8, 0x89, 0x67, 0x39, 0xae, 0x88, 0x1b, 0x27, 0x1a, 0x7b,
+	0x50, 0x12, 0xae, 0x5a, 0x0f, 0xa6, 0xf1, 0x1c, 0xee, 0xaf, 0xec, 0x18, 0x6b, 0x24, 0xe5, 0x32,
+	0xd8, 0xdf, 0x73, 0x50, 0xe4, 0x09, 0x87, 0x07, 0x50, 0xf9, 0x26, 0x0a, 0x7c, 0x53, 0xa4, 0xad,
+	0xc4, 0xfc, 0xf0, 0xd9, 0xda, 0x61, 0xd0, 0xfb, 0xdd, 0x8e, 0x48, 0x5d, 0xa0, 0x38, 0x02, 0xf5,
+	0x63, 0xa8, 0x0e, 0x1d, 0x7a, 0x02, 0xcf, 0xf1, 0xad, 0x38, 0x08, 0xc5, 0xe6, 0x8b, 0x4c, 0x3a,
+	0x5f, 0x87, 0xc4, 0x1a, 0x9a, 0x81, 0xef, 0x4e, 0x99, 0x7b, 0x14, 0x43, 0xa1, 0x8c, 0xae, 0xef,
+	0xae, 0x98, 0xd1, 0x0a, 0xef, 0xa0, 0xc1, 0x36, 0xa1, 0x44, 0xae, 0x2d, 0x6f, 0xe2, 0x12, 0xf6,
+	0x3a, 0x2b, 0xfb, 0xdb, 0x4b, 0x2f, 0xa0, 0xe5, 0x4f, 0x8d, 0x54, 0x49, 0xa4, 0xf5, 0x77, 0x25,
+	0x80, 0xf9, 0xc5, 0xa9, 0x7f, 0x43, 0x32, 0x12, 0xf1, 0xa5, 0xcb, 0x79, 0xba, 0x17, 0xde, 0x90,
+	0xee, 0xc5, 0xe5, 0x48, 0xa9, 0x50, 0x1a, 0x92, 0x91, 0x95, 0xb8, 0xb1, 0x5a, 0xe2, 0xc9, 0x22,
+	0xc8, 0x45, 0x57, 0x29, 0x37, 0x5c, 0xf5, 0xff, 0x50, 0xf1, 0x12, 0x37, 0x76, 0x26, 0x2e, 0x31,
+	0x83, 0x91, 0x0a, 0x3b, 0xd2, 0xae, 0x64, 0x40, 0xca, 0xea, 0x8e, 0x28, 0xae, 0x67, 0x5d, 0x3b,
+	0x5e, 0xe2, 0xb1, 0x76, 0x2b, 0x19, 0x29, 0x89, 0x9f, 0xc0, 0x5d, 0x72, 0x6d, 0xbb, 0x49, 0xe4,
+	0x5c, 0x12, 0x33, 0xd5, 0xd9, 0x62, 0xf8, 0x68, 0x26, 0x78, 0x21, 0x94, 0x29, 0x8c, 0xe3, 0x33,
+	0x95, 0xaa, 0x80, 0xe1, 0xe4, 0x0d, 0x18, 0xa1, 0x53, 0xbb, 0x09, 0x23, 0x94, 0x1f, 0x02, 0x78,
+	0xd6, 0xb5, 0xe9, 0x12, 0x7f, 0x1c, 0x5f, 0xa8, 0x77, 0x76, 0xa4, 0x5d, 0xd9, 0x28, 0x7b, 0xd6,
+	0xf5, 0x09, 0x63, 0x30, 0xb1, 0xe3, 0xa7, 0x62, 0x24, 0xc4, 0x8e, 0x2f, 0xc4, 0x2a, 0x94, 0x26,
+	0x56, 0x4c, 0x63, 0xa8, 0xde, 0xe5, 0x3e, 0x12, 0x24, 0xf5, 0x11, 0xc5, 0x75, 0x62, 0xe2, 0x45,
+	0xea, 0x36, 0xb3, 0x53, 0x3c, 0xeb, 0x5a, 0xa3, 0x34, 0x13, 0x3a, 0xbe, 0x10, 0xde, 0x17, 0x42,
+	0xc7, 0xe7, 0xc2, 0x8f, 0x60, 0x2b, 0xf1, 0x9d, 0x6f, 0x13, 0x22, 0xe4, 0xef, 0xb3, 0x93, 0x57,
+	0x38, 0x8f, 0xab, 0xfc, 0x14, 0x6a, 0x14, 0x7c, 0x12, 0xd2, 0xe1, 0x2b, 0x76, 0x48, 0xa4, 0xaa,
+	0x0c, 0xa4, 0xea, 0x59, 0xd7, 0xbd, 0x19, 0x93, 0xa9, 0x39, 0x7e, 0x56, 0xed, 0x03, 0xa1, 0xe6,
+	0xf8, 0x19, 0xb5, 0x3a, 0x28, 0x21, 0x9f, 0x50, 0x86, 0x6a, 0x9d, 0x4f, 0x66, 0x29, 0x4d, 0x1f,
+	0x8f, 0x15, 0x86, 0xd6, 0x54, 0x6d, 0x30, 0x01, 0x27, 0xf0, 0x2b, 0x90, 0xe3, 0xe9, 0x84, 0xa8,
+	0x3f, 0x61, 0xdf, 0x9a, 0xda, 0x2d, 0x12, 0x34, 0xb3, 0xec, 0x3b, 0xf4, 0x35, 0x0f, 0xa6, 0x13,
+	0x12, 0x19, 0x0c, 0xb6, 0x71, 0x05, 0xf7, 0x57, 0x8a, 0x17, 0x47, 0xf1, 0x32, 0x14, 0x5a, 0x86,
+	0xd1, 0x7a, 0x89, 0x24, 0xca, 0x3f, 0xe8, 0x76, 0x4f, 0xda, 0xad, 0x0e, 0xca, 0x51, 0x42, 0xeb,
+	0x0c, 0xda, 0xc7, 0x6d, 0x03, 0xe5, 0xe9, 0xbc, 0xde, 0x39, 0x3d, 0x39, 0x41, 0x32, 0x06, 0x28,
+	0x76, 0x4e, 0x5f, 0x1c, 0xb4, 0x0d, 0x54, 0xa0, 0xeb, 0xee, 0x81, 0xde, 0x3e, 0x1c, 0xa0, 0x22,
+	0x5d, 0xf7, 0x07, 0x86, 0xd6, 0x39, 0x46, 0x25, 0x5d, 0x56, 0x24, 0x94, 0xd3, 0x65, 0x25, 0x87,
+	0xf2, 0x3c, 0xbb, 0x66, 0x33, 0x3c, 0x46, 0xf7, 0x74, 0x59, 0xb9, 0x87, 0xb6, 0x75, 0x59, 0xf9,
+	0x3f, 0xa4, 0xea, 0xb2, 0xf2, 0x21, 0x7a, 0xa0, 0xcb, 0xca, 0x03, 0xf4, 0x50, 0x97, 0x95, 0x87,
+	0xe8, 0x91, 0x2e, 0x2b, 0x8f, 0x50, 0x43, 0x97, 0x95, 0x8f, 0xd1, 0x63, 0x5d, 0x56, 0x1e, 0xa3,
+	0x27, 0xba, 0xac, 0x3c, 0x41, 0xcd, 0xc6, 0x9f, 0x25, 0xc8, 0x0f, 0xac, 0xf1, 0x1a, 0xfd, 0x75,
+	0xa9, 0xc8, 0xe4, 0x7f, 0xf8, 0x22, 0xc3, 0xaf, 0xd8, 0xf8, 0xb7, 0x04, 0xf7, 0x56, 0x7c, 0xfa,
+	0xe2, 0x51, 0x66, 0xda, 0x95, 0x58, 0x13, 0xd4, 0xdf, 0xe6, 0x53, 0x7a, 0xc6, 0xe3, 0x1d, 0x71,
+	0x86, 0x5d, 0x8f, 0xa1, 0xba, 0x20, 0x5a, 0xd1, 0x0d, 0x5f, 0x2c, 0x76, 0xc3, 0x5f, 0x6c, 0x7c,
+	0x0e, 0xf1, 0x4f, 0x46, 0xa6, 0x5d, 0xfe, 0xa7, 0x08, 0xb5, 0x45, 0x29, 0xee, 0x89, 0x97, 0x4c,
+	0x37, 0xae, 0xdd, 0x62, 0xb4, 0xe7, 0x30, 0x4d, 0xfa, 0x3c, 0xf9, 0xe3, 0x5d, 0x23, 0xce, 0x69,
+	0x8f, 0xcd, 0x67, 0x7a, 0xac, 0x0e, 0x39, 0xc7, 0x67, 0xc3, 0x52, 0x6d, 0xff, 0x8b, 0xdb, 0x9e,
+	0x42, 0xf3, 0x8d, 0x9c, 0xe3, 0xd3, 0x3b, 0x8d, 0xdc, 0xe0, 0x8a, 0xd5, 0xfb, 0xb7, 0xb8, 0xd3,
+	0x97, 0x6e, 0x70, 0x65, 0x30, 0x24, 0x5a, 0x51, 0xad, 0x24, 0xbe, 0x08, 0x42, 0xe7, 0x8f, 0xfc,
+	0xf3, 0x8f, 0xb6, 0x70, 0xde, 0x32, 0xd0, 0x82, 0xe0, 0x34, 0x74, 0x69, 0x71, 0x8b, 0x83, 0xd7,
+	0x84, 0x2b, 0xf1, 0xce, 0xa1, 0x30, 0x06, 0x15, 0xb2, 0x99, 0x3c, 0x98, 0x90, 0x88, 0xf5, 0x8d,
+	0xcd, 0x66, 0x72, 0x6a, 0x66, 0x08, 0x73, 0x3c, 0x5e, 0x18, 0xd8, 0xf8, 0x37, 0xe3, 0xf1, 0x6d,
+	0xaf, 0xfa, 0x23, 0x8c, 0x6e, 0xcf, 0x41, 0xa6, 0x8f, 0x06, 0x23, 0xd8, 0x1a, 0xbc, 0xec, 0xb5,
+	0x4d, 0xad, 0xf3, 0x55, 0xeb, 0x44, 0x3b, 0x42, 0xef, 0xe1, 0x1a, 0x00, 0xe3, 0x1c, 0xb4, 0xfa,
+	0xda, 0x21, 0x92, 0x66, 0x1a, 0xad, 0x9e, 0x66, 0x3e, 0x6f, 0xbf, 0x44, 0x39, 0x7c, 0x07, 0x2a,
+	0x8c, 0xd3, 0x6d, 0x9d, 0x0e, 0x9e, 0xed, 0xa3, 0x7c, 0xe3, 0x53, 0xc8, 0x69, 0x3e, 0x35, 0xd4,
+	0x3a, 0x19, 0xa0, 0x2d, 0x50, 0xb4, 0x8e, 0xf9, 0xbb, 0xd3, 0xb6, 0x41, 0x6b, 0x64, 0x15, 0xca,
+	0x5a, 0xc7, 0x7c, 0xd6, 0x6e, 0x1d, 0xb5, 0x0d, 0x94, 0x6b, 0x7c, 0x03, 0x32, 0x0d, 0x30, 0x45,
+	0xff, 0xf2, 0xa4, 0x7b, 0x96, 0x31, 0xbb, 0x0b, 0x55, 0xce, 0x79, 0xd1, 0x3b, 0xd1, 0x0e, 0xb5,
+	0x01, 0x92, 0x66, 0xac, 0x5e, 0xab, 0xdf, 0x3f, 0xeb, 0x1a, 0x47, 0x28, 0x87, 0xb7, 0x01, 0x31,
+	0x56, 0xab, 0x47, 0xb5, 0x5a, 0x03, 0xad, 0xdb, 0x41, 0xf9, 0x39, 0xf7, 0xf0, 0xb0, 0xdd, 0xef,
+	0x9b, 0x87, 0xdd, 0xa3, 0x36, 0x92, 0x1b, 0xff, 0xca, 0xcd, 0xab, 0x4d, 0xe6, 0x5b, 0x18, 0xff,
+	0x49, 0xca, 0xfc, 0x8b, 0x17, 0xce, 0x05, 0xa2, 0xf4, 0x9c, 0xbe, 0xcd, 0x87, 0xf6, 0x2a, 0x1e,
+	0x0f, 0xee, 0xec, 0xef, 0xbd, 0x8c, 0xa4, 0xfe, 0x09, 0xa8, 0x2b, 0x0c, 0x58, 0xd4, 0x68, 0x0f,
+	0x64, 0x8f, 0x4e, 0xfc, 0x5d, 0xc3, 0x89, 0xfa, 0x77, 0xd2, 0x4a, 0x93, 0xef, 0x7b, 0x21, 0xaf,
+	0x17, 0x5f, 0xc8, 0x0f, 0x7e, 0xb7, 0xa5, 0x07, 0xf6, 0x17, 0x89, 0x8e, 0xd5, 0x2c, 0x57, 0x7a,
+	0xd9, 0x0b, 0x54, 0x36, 0xa9, 0x2f, 0xcc, 0x9e, 0xff, 0x70, 0xe7, 0x89, 0xcb, 0x7f, 0x0e, 0x30,
+	0x67, 0xae, 0xb8, 0xed, 0x76, 0xf6, 0xb6, 0xe5, 0xcc, 0xb1, 0x0e, 0x0e, 0x7f, 0xdf, 0x1a, 0x3b,
+	0xf1, 0x45, 0x72, 0xde, 0xb4, 0x03, 0x6f, 0x8f, 0x1e, 0xe4, 0x29, 0xb1, 0x83, 0x68, 0x1a, 0xc5,
+	0x44, 0x90, 0xe2, 0x5c, 0x7b, 0xdf, 0xff, 0xd7, 0xff, 0x79, 0x91, 0xc9, 0x3e, 0xfb, 0x6f, 0x00,
+	0x00, 0x00, 0xff, 0xff, 0x29, 0x5a, 0xd3, 0x93, 0x1f, 0x18, 0x00, 0x00,
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto
new file mode 100644
index 0000000..83cb564
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto
@@ -0,0 +1,379 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_swagger.options;
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options";
+
+import "google/protobuf/any.proto";
+import "google/protobuf/struct.proto";
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// TODO(ivucica): document fields
+message Swagger {
+  string swagger = 1;
+  Info info = 2;
+  string host = 3;
+  // `base_path` is the common prefix path used on all API endpoints (ie. /api, /v1, etc.). By adding this,
+  // it allows you to remove this portion from the path endpoints in your Swagger file making them easier
+  // to read. Note that using `base_path` does not change the endpoint paths that are generated in the resulting 
+  // Swagger file. If you wish to use `base_path` with relatively generated Swagger paths, the 
+  // `base_path` prefix must be manually removed from your `google.api.http` paths and your code changed to 
+  // serve the API from the `base_path`.
+  string base_path = 4;
+  enum SwaggerScheme {
+    UNKNOWN = 0;
+    HTTP = 1;
+    HTTPS = 2;
+    WS = 3;
+    WSS = 4;
+  }
+  repeated SwaggerScheme schemes = 5;
+  repeated string consumes = 6;
+  repeated string produces = 7;
+  // field 8 is reserved for 'paths'.
+  reserved 8;
+  // field 9 is reserved for 'definitions', which at this time are already
+  // exposed as and customizable as proto messages.
+  reserved 9;
+  map<string, Response> responses = 10;
+  SecurityDefinitions security_definitions = 11;
+  repeated SecurityRequirement security = 12;
+  // field 13 is reserved for 'tags', which are supposed to be exposed as and
+  // customizable as proto services. TODO(ivucica): add processing of proto
+  // service objects into OpenAPI v2 Tag objects.
+  reserved 13;
+  ExternalDocumentation external_docs = 14;
+  map<string, google.protobuf.Value> extensions = 15;
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// TODO(ivucica): document fields
+message Operation {
+  repeated string tags = 1;
+  string summary = 2;
+  string description = 3;
+  ExternalDocumentation external_docs = 4;
+  string operation_id = 5;
+  repeated string consumes = 6;
+  repeated string produces = 7;
+  // field 8 is reserved for 'parameters'.
+  reserved 8;
+  map<string, Response> responses = 9;
+  repeated string schemes = 10;
+  bool deprecated = 11;
+  repeated SecurityRequirement security = 12;
+  map<string, google.protobuf.Value> extensions = 13;
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+message Response {
+  // `Description` is a short description of the response.
+  // GFM syntax can be used for rich text representation.
+  string description = 1;
+  // `Schema` optionally defines the structure of the response.
+  // If `Schema` is not provided, it means there is no content to the response.
+  Schema schema = 2;
+  // field 3 is reserved for 'headers'.
+  reserved 3;
+  // field 3 is reserved for 'example'.
+  reserved 4;
+  map<string, google.protobuf.Value> extensions = 5;
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// TODO(ivucica): document fields
+message Info {
+  string title = 1;
+  string description = 2;
+  string terms_of_service = 3;
+  Contact contact = 4;
+  License license = 5;
+  string version = 6;
+  map<string, google.protobuf.Value> extensions = 7;
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// TODO(ivucica): document fields
+message Contact {
+  string name = 1;
+  string url = 2;
+  string email = 3;
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+message License {
+  // Required. The license name used for the API.
+  string name = 1;
+  // A URL to the license used for the API.
+  string url = 2;
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// TODO(ivucica): document fields
+message ExternalDocumentation {
+  string description = 1;
+  string url = 2;
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// TODO(ivucica): document fields
+message Schema {
+  JSONSchema json_schema = 1;
+  string discriminator = 2;
+  bool read_only = 3;
+  // field 4 is reserved for 'xml'.
+  reserved 4;
+  ExternalDocumentation external_docs = 5;
+  google.protobuf.Any example = 6;
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// TODO(ivucica): document fields
+message JSONSchema {
+  // field 1 is reserved for '$id', omitted from OpenAPI v2.
+  reserved 1;
+  // field 2 is reserved for '$schema', omitted from OpenAPI v2.
+  reserved 2;
+  // Ref is used to define an external reference to include in the message.
+  // This could be a fully qualified proto message reference, and that type must be imported
+  // into the protofile. If no message is identified, the Ref will be used verbatim in
+  // the output.
+  // For example:
+  //  `ref: ".google.protobuf.Timestamp"`.
+  string ref = 3;
+  // field 4 is reserved for '$comment', omitted from OpenAPI v2.
+  reserved 4;
+  string title = 5;
+  string description = 6;
+  string default = 7;
+  bool read_only = 8;
+  // field 9 is reserved for 'examples', which is omitted from OpenAPI v2 in favor of 'example' field.
+  reserved 9;
+  double multiple_of = 10;
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  uint64 max_length = 15;
+  uint64 min_length = 16;
+  string pattern = 17;
+  // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2.
+  reserved 18;
+  // field 19 is reserved for 'items', but in OpenAPI-specific way. TODO(ivucica): add 'items'?
+  reserved 19;
+  uint64 max_items = 20;
+  uint64 min_items = 21;
+  bool unique_items = 22;
+  // field 23 is reserved for 'contains', omitted from OpenAPI v2.
+  reserved 23;
+  uint64 max_properties = 24;
+  uint64 min_properties = 25;
+  repeated string required = 26;
+  // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'?
+  reserved 27;
+  // field 28 is reserved for 'definitions', omitted from OpenAPI v2.
+  reserved 28;
+  // field 29 is reserved for 'properties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'?
+  reserved 29;
+  // following fields are reserved, as the properties have been omitted from OpenAPI v2:
+  // patternProperties, dependencies, propertyNames, const
+  reserved 30 to 33;
+  // Items in 'array' must be unique.
+  repeated string array = 34;
+
+  enum JSONSchemaSimpleTypes {
+    UNKNOWN = 0;
+    ARRAY = 1;
+    BOOLEAN = 2;
+    INTEGER = 3;
+    NULL = 4;
+    NUMBER = 5;
+    OBJECT = 6;
+    STRING = 7;
+  }
+
+  repeated JSONSchemaSimpleTypes type = 35;
+  // following fields are reserved, as the properties have been omitted from OpenAPI v2:
+  // format, contentMediaType, contentEncoding, if, then, else
+  reserved 36 to 41;
+  // field 42 is reserved for 'allOf', but in OpenAPI-specific way. TODO(ivucica): add 'allOf'?
+  reserved 42;
+  // following fields are reserved, as the properties have been omitted from OpenAPI v2:
+  // anyOf, oneOf, not
+  reserved 43 to 45;
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+// TODO(ivucica): document fields
+message Tag {
+  // field 1 is reserved for 'name'. In our generator, this is (to be) extracted
+  // from the name of proto service, and thus not exposed to the user, as
+  // changing tag object's name would break the link to the references to the
+  // tag in individual operation specifications.
+  //
+  // TODO(ivucica): Add 'name' property. Use it to allow override of the name of
+  // global Tag object, then use that name to reference the tag throughout the
+  // Swagger file.
+  reserved 1;
+  // TODO(ivucica): Description should be extracted from comments on the proto
+  // service object.
+  string description = 2;
+  ExternalDocumentation external_docs = 3;
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+message SecurityDefinitions {
+  // A single security scheme definition, mapping a "name" to the scheme it defines.
+  map<string, SecurityScheme> security = 1;
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+message SecurityScheme {
+  // Required. The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  enum Type {
+    TYPE_INVALID = 0;
+    TYPE_BASIC = 1;
+    TYPE_API_KEY = 2;
+    TYPE_OAUTH2 = 3;
+  }
+
+  // Required. The location of the API key. Valid values are "query" or "header".
+  enum In {
+    IN_INVALID = 0;
+    IN_QUERY = 1;
+    IN_HEADER = 2;
+  }
+
+  // Required. The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  enum Flow {
+    FLOW_INVALID = 0;
+    FLOW_IMPLICIT = 1;
+    FLOW_PASSWORD = 2;
+    FLOW_APPLICATION = 3;
+    FLOW_ACCESS_CODE = 4;
+  }
+
+  // Required. The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  Type type = 1;
+  // A short description for security scheme.
+  string description = 2;
+  // Required. The name of the header or query parameter to be used.
+  //
+  // Valid for apiKey.
+  string name = 3;
+  // Required. The location of the API key. Valid values are "query" or "header".
+  //
+  // Valid for apiKey.
+  In in = 4;
+  // Required. The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  //
+  // Valid for oauth2.
+  Flow flow = 5;
+  // Required. The authorization URL to be used for this flow. This SHOULD be in
+  // the form of a URL.
+  //
+  // Valid for oauth2/implicit and oauth2/accessCode.
+  string authorization_url = 6;
+  // Required. The token URL to be used for this flow. This SHOULD be in the
+  // form of a URL.
+  //
+  // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+  string token_url = 7;
+  // Required. The available scopes for the OAuth2 security scheme.
+  //
+  // Valid for oauth2.
+  Scopes scopes = 8;
+  map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+message SecurityRequirement {
+  // If the security scheme is of type "oauth2", then the value is a list of
+  // scope names required for the execution. For other security scheme types,
+  // the array MUST be empty.
+  message SecurityRequirementValue {
+    repeated string scope = 1;
+  }
+  // Each name must correspond to a security scheme which is declared in
+  // the Security Definitions. If the security scheme is of type "oauth2",
+  // then the value is a list of scope names required for the execution.
+  // For other security scheme types, the array MUST be empty.
+  map<string, SecurityRequirementValue> security_requirement = 1;
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+message Scopes {
+  // Maps between a name of a scope to a short description of it (as the value
+  // of the property).
+  map<string, string> scope = 1;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
new file mode 100644
index 0000000..819c45a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
@@ -0,0 +1,86 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "context.go",
+        "convert.go",
+        "doc.go",
+        "errors.go",
+        "fieldmask.go",
+        "handler.go",
+        "marshal_httpbodyproto.go",
+        "marshal_json.go",
+        "marshal_jsonpb.go",
+        "marshal_proto.go",
+        "marshaler.go",
+        "marshaler_registry.go",
+        "mux.go",
+        "pattern.go",
+        "proto2_convert.go",
+        "proto_errors.go",
+        "query.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
+    deps = [
+        "//internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//descriptor:go_default_library_gen",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@go_googleapis//google/api:httpbody_go_proto",
+        "@io_bazel_rules_go//proto/wkt:any_go_proto",
+        "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
+        "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+        "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//grpclog:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = [
+        "context_test.go",
+        "convert_test.go",
+        "errors_test.go",
+        "fieldmask_test.go",
+        "handler_test.go",
+        "marshal_httpbodyproto_test.go",
+        "marshal_json_test.go",
+        "marshal_jsonpb_test.go",
+        "marshal_proto_test.go",
+        "marshaler_registry_test.go",
+        "mux_test.go",
+        "pattern_test.go",
+        "query_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//examples/proto/examplepb:go_default_library",
+        "//internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//ptypes:go_default_library_gen",
+        "@go_googleapis//google/api:httpbody_go_proto",
+        "@go_googleapis//google/rpc:errdetails_go_proto",
+        "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+        "@io_bazel_rules_go//proto/wkt:empty_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@io_bazel_rules_go//proto/wkt:struct_go_proto",
+        "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+        "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
new file mode 100644
index 0000000..f808382
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
@@ -0,0 +1,236 @@
+package runtime
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"net/textproto"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+var (
+	// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+	// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+	DefaultContextTimeout = 0 * time.Second
+)
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
+	var pairs []string
+	timeout := DefaultContextTimeout
+	if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+		var err error
+		timeout, err = timeoutDecode(tm)
+		if err != nil {
+			return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+		}
+	}
+
+	for key, vals := range req.Header {
+		for _, val := range vals {
+			key = textproto.CanonicalMIMEHeaderKey(key)
+			// For backwards-compatibility, pass through 'authorization' header with no prefix.
+			if key == "Authorization" {
+				pairs = append(pairs, "authorization", val)
+			}
+			if h, ok := mux.incomingHeaderMatcher(key); ok {
+				// Handles "-bin" metadata in grpc, since grpc will do another base64
+				// encode before sending to server, we need to decode it first.
+				if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+					b, err := decodeBinHeader(val)
+					if err != nil {
+						return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+					}
+
+					val = string(b)
+				}
+				pairs = append(pairs, h, val)
+			}
+		}
+	}
+	if host := req.Header.Get(xForwardedHost); host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+	} else if req.Host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+	}
+
+	if addr := req.RemoteAddr; addr != "" {
+		if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+			if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+			} else {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+			}
+		} else {
+			grpclog.Infof("invalid remote addr: %s", addr)
+		}
+	}
+
+	if timeout != 0 {
+		ctx, _ = context.WithTimeout(ctx, timeout)
+	}
+	if len(pairs) == 0 {
+		return ctx, nil, nil
+	}
+	md := metadata.Pairs(pairs...)
+	for _, mda := range mux.metadataAnnotators {
+		md = metadata.Join(md, mda(ctx, req))
+	}
+	return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+	HeaderMD  metadata.MD
+	TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+	return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+	md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+	return
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("timeout string is too short: %q", s)
+	}
+	d, ok := timeoutUnitToDuration(s[size-1])
+	if !ok {
+		return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+	switch u {
+	case 'H':
+		return time.Hour, true
+	case 'M':
+		return time.Minute, true
+	case 'S':
+		return time.Second, true
+	case 'm':
+		return time.Millisecond, true
+	case 'u':
+		return time.Microsecond, true
+	case 'n':
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permenant request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+	switch hdr {
+	case
+		"Accept",
+		"Accept-Charset",
+		"Accept-Language",
+		"Accept-Ranges",
+		"Authorization",
+		"Cache-Control",
+		"Content-Type",
+		"Cookie",
+		"Date",
+		"Expect",
+		"From",
+		"Host",
+		"If-Match",
+		"If-Modified-Since",
+		"If-None-Match",
+		"If-Schedule-Tag-Match",
+		"If-Unmodified-Since",
+		"Max-Forwards",
+		"Origin",
+		"Pragma",
+		"Referer",
+		"User-Agent",
+		"Via",
+		"Warning":
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
new file mode 100644
index 0000000..2c27934
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
@@ -0,0 +1,318 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/ptypes/duration"
+	"github.com/golang/protobuf/ptypes/timestamp"
+	"github.com/golang/protobuf/ptypes/wrappers"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+	return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+	return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+	return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+	s := strings.Split(val, sep)
+	values := make([]bool, len(s))
+	for i, v := range s {
+		value, err := Bool(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+	return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+	s := strings.Split(val, sep)
+	values := make([]float64, len(s))
+	for i, v := range s {
+		value, err := Float64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+	f, err := strconv.ParseFloat(val, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+	s := strings.Split(val, sep)
+	values := make([]float32, len(s))
+	for i, v := range s {
+		value, err := Float32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+	return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+	s := strings.Split(val, sep)
+	values := make([]int64, len(s))
+	for i, v := range s {
+		value, err := Int64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+	i, err := strconv.ParseInt(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Int32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+	return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint64, len(s))
+	for i, v := range s {
+		value, err := Uint64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+	i, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint32, len(s))
+	for i, v := range s {
+		value, err := Uint32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+	b, err := base64.StdEncoding.DecodeString(val)
+	if err != nil {
+		b, err = base64.URLEncoding.DecodeString(val)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+	s := strings.Split(val, sep)
+	values := make([][]byte, len(s))
+	for i, v := range s {
+		value, err := Bytes(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamp.Timestamp, error) {
+	var r timestamp.Timestamp
+	err := jsonpb.UnmarshalString(val, &r)
+	if err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*duration.Duration, error) {
+	var r duration.Duration
+	err := jsonpb.UnmarshalString(val, &r)
+	if err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+	e, ok := enumValMap[val]
+	if ok {
+		return e, nil
+	}
+
+	i, err := Int32(val)
+	if err != nil {
+		return 0, fmt.Errorf("%s is not valid", val)
+	}
+	for _, v := range enumValMap {
+		if v == i {
+			return i, nil
+		}
+	}
+	return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Enum(v, enumValMap)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+/*
+	Support fot google.protobuf.wrappers on top of primitive types
+*/
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrappers.StringValue, error) {
+	return &wrappers.StringValue{Value: val}, nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrappers.FloatValue, error) {
+	parsedVal, err := Float32(val)
+	return &wrappers.FloatValue{Value: parsedVal}, err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrappers.DoubleValue, error) {
+	parsedVal, err := Float64(val)
+	return &wrappers.DoubleValue{Value: parsedVal}, err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrappers.BoolValue, error) {
+	parsedVal, err := Bool(val)
+	return &wrappers.BoolValue{Value: parsedVal}, err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrappers.Int32Value, error) {
+	parsedVal, err := Int32(val)
+	return &wrappers.Int32Value{Value: parsedVal}, err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrappers.UInt32Value, error) {
+	parsedVal, err := Uint32(val)
+	return &wrappers.UInt32Value{Value: parsedVal}, err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrappers.Int64Value, error) {
+	parsedVal, err := Int64(val)
+	return &wrappers.Int64Value{Value: parsedVal}, err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrappers.UInt64Value, error) {
+	parsedVal, err := Uint64(val)
+	return &wrappers.UInt64Value{Value: parsedVal}, err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrappers.BytesValue, error) {
+	parsedVal, err := Bytes(val)
+	return &wrappers.BytesValue{Value: parsedVal}, err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
new file mode 100644
index 0000000..b6e5ddf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
new file mode 100644
index 0000000..a360807
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
@@ -0,0 +1,146 @@
+package runtime
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+	switch code {
+	case codes.OK:
+		return http.StatusOK
+	case codes.Canceled:
+		return http.StatusRequestTimeout
+	case codes.Unknown:
+		return http.StatusInternalServerError
+	case codes.InvalidArgument:
+		return http.StatusBadRequest
+	case codes.DeadlineExceeded:
+		return http.StatusGatewayTimeout
+	case codes.NotFound:
+		return http.StatusNotFound
+	case codes.AlreadyExists:
+		return http.StatusConflict
+	case codes.PermissionDenied:
+		return http.StatusForbidden
+	case codes.Unauthenticated:
+		return http.StatusUnauthorized
+	case codes.ResourceExhausted:
+		return http.StatusTooManyRequests
+	case codes.FailedPrecondition:
+		// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+		return http.StatusBadRequest
+	case codes.Aborted:
+		return http.StatusConflict
+	case codes.OutOfRange:
+		return http.StatusBadRequest
+	case codes.Unimplemented:
+		return http.StatusNotImplemented
+	case codes.Internal:
+		return http.StatusInternalServerError
+	case codes.Unavailable:
+		return http.StatusServiceUnavailable
+	case codes.DataLoss:
+		return http.StatusInternalServerError
+	}
+
+	grpclog.Infof("Unknown gRPC error code: %v", code)
+	return http.StatusInternalServerError
+}
+
+var (
+	// HTTPError replies to the request with the error.
+	// You can set a custom function to this variable to customize error format.
+	HTTPError = DefaultHTTPError
+	// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
+	OtherErrorHandler = DefaultOtherErrorHandler
+)
+
+type errorBody struct {
+	Error string `protobuf:"bytes,100,name=error" json:"error"`
+	// This is to make the error more compatible with users that expect errors to be Status objects:
+	// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
+	// It should be the exact same message as the Error field.
+	Code    int32      `protobuf:"varint,1,name=code" json:"code"`
+	Message string     `protobuf:"bytes,2,name=message" json:"message"`
+	Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+// Make this also conform to proto.Message for builtin JSONPb Marshaler
+func (e *errorBody) Reset()         { *e = errorBody{} }
+func (e *errorBody) String() string { return proto.CompactTextString(e) }
+func (*errorBody) ProtoMessage()    {}
+
+// DefaultHTTPError is the default implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a JSON object,
+// which contains a member whose key is "error" and whose value is err.Error().
+func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	const fallback = `{"error": "failed to marshal error message"}`
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	w.Header().Del("Trailer")
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		pb := s.Proto()
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	body := &errorBody{
+		Error:   s.Message(),
+		Message: s.Message(),
+		Code:    int32(s.Code()),
+		Details: s.Proto().GetDetails(),
+	}
+
+	buf, merr := marshaler.Marshal(body)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
+// It simply writes a string representation of the given error into "w".
+func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
+	http.Error(w, msg, code)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
new file mode 100644
index 0000000..341aad5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
@@ -0,0 +1,82 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+	"strings"
+
+	descriptor2 "github.com/golang/protobuf/descriptor"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/genproto/protobuf/field_mask"
+)
+
+func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
+	// TODO - should really gate this with a test that the marshaller has used json names
+	if md != nil {
+		for _, f := range md.Field {
+			if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
+				var subType *descriptor.DescriptorProto
+
+				// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
+				if f.TypeName != nil {
+					typeSplit := strings.Split(*f.TypeName, ".")
+					typeName := typeSplit[len(typeSplit)-1]
+					for _, t := range md.NestedType {
+						if typeName == *t.Name {
+							subType = t
+						}
+					}
+				}
+				return *f.Name, subType
+			}
+		}
+	}
+	return name, nil
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
+	fm := &field_mask.FieldMask{}
+	var root interface{}
+	if err := json.NewDecoder(r).Decode(&root); err != nil {
+		if err == io.EOF {
+			return fm, nil
+		}
+		return nil, err
+	}
+
+	queue := []fieldMaskPathItem{{node: root, md: md}}
+	for len(queue) > 0 {
+		// dequeue an item
+		item := queue[0]
+		queue = queue[1:]
+
+		if m, ok := item.node.(map[string]interface{}); ok {
+			// if the item is an object, then enqueue all of its children
+			for k, v := range m {
+				protoName, subMd := translateName(k, item.md)
+				if subMsg, ok := v.(descriptor2.Message); ok {
+					_, subMd = descriptor2.ForMessage(subMsg)
+				}
+				queue = append(queue, fieldMaskPathItem{path: append(item.path, protoName), node: v, md: subMd})
+			}
+		} else if len(item.path) > 0 {
+			// otherwise, it's a leaf node so print its path
+			fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
+		}
+	}
+
+	return fm, nil
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+	// the list of prior fields leading up to node
+	path []string
+
+	// a generic decoded json object the current item to inspect for further path extraction
+	node interface{}
+
+	// descriptor for parent message
+	md *descriptor.DescriptorProto
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
new file mode 100644
index 0000000..2af9006
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
@@ -0,0 +1,209 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"net/textproto"
+
+	"context"
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/internal"
+	"google.golang.org/grpc/grpclog"
+)
+
+var errEmptyResponse = errors.New("empty response")
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	f, ok := w.(http.Flusher)
+	if !ok {
+		grpclog.Infof("Flush not supported in %T", w)
+		http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+		http.Error(w, "unexpected error", http.StatusInternalServerError)
+		return
+	}
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	w.Header().Set("Transfer-Encoding", "chunked")
+	w.Header().Set("Content-Type", marshaler.ContentType())
+	if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	var delimiter []byte
+	if d, ok := marshaler.(Delimited); ok {
+		delimiter = d.Delimiter()
+	} else {
+		delimiter = []byte("\n")
+	}
+
+	var wroteHeader bool
+	for {
+		resp, err := recv()
+		if err == io.EOF {
+			return
+		}
+		if err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+		if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+
+		buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler))
+		if err != nil {
+			grpclog.Infof("Failed to marshal response chunk: %v", err)
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+		if _, err = w.Write(buf); err != nil {
+			grpclog.Infof("Failed to send response chunk: %v", err)
+			return
+		}
+		wroteHeader = true
+		if _, err = w.Write(delimiter); err != nil {
+			grpclog.Infof("Failed to send delimiter chunk: %v", err)
+			return
+		}
+		f.Flush()
+	}
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.HeaderMD {
+		if h, ok := mux.outgoingHeaderMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+	for k := range md.TrailerMD {
+		tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
+		w.Header().Add("Trailer", tKey)
+	}
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+	for k, vs := range md.TrailerMD {
+		tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
+		for _, v := range vs {
+			w.Header().Add(tKey, v)
+		}
+	}
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+	XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+	var buf []byte
+	var err error
+	if rb, ok := resp.(responseBody); ok {
+		buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+	} else {
+		buf, err = marshaler.Marshal(resp)
+	}
+	if err != nil {
+		grpclog.Infof("Marshal error: %v", err)
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	if _, err = w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+	if len(opts) == 0 {
+		return nil
+	}
+	for _, opt := range opts {
+		if err := opt(ctx, w, resp); err != nil {
+			grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+			return err
+		}
+	}
+	return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
+	serr := streamError(ctx, mux.streamErrorHandler, err)
+	if !wroteHeader {
+		w.WriteHeader(int(serr.HttpCode))
+	}
+	buf, merr := marshaler.Marshal(errorChunk(serr))
+	if merr != nil {
+		grpclog.Infof("Failed to marshal an error: %v", merr)
+		return
+	}
+	if _, werr := w.Write(buf); werr != nil {
+		grpclog.Infof("Failed to notify error to client: %v", werr)
+		return
+	}
+}
+
+// streamChunk returns a chunk in a response stream for the given result. The
+// given errHandler is used to render an error chunk if result is nil.
+func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message {
+	if result == nil {
+		return errorChunk(streamError(ctx, errHandler, errEmptyResponse))
+	}
+	return map[string]proto.Message{"result": result}
+}
+
+// streamError returns the payload for the final message in a response stream
+// that represents the given err.
+func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
+	serr := errHandler(ctx, err)
+	if serr != nil {
+		return serr
+	}
+	// TODO: log about misbehaving stream error handler?
+	return DefaultHTTPStreamErrorHandler(ctx, err)
+}
+
+func errorChunk(err *StreamError) map[string]proto.Message {
+	return map[string]proto.Message{"error": (*internal.StreamError)(err)}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
new file mode 100644
index 0000000..f55285b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,43 @@
+package runtime
+
+import (
+	"google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
+func SetHTTPBodyMarshaler(serveMux *ServeMux) {
+	serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
+		Marshaler: &JSONPb{OrigName: true},
+	}
+}
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+	Marshaler
+}
+
+// ContentType implementation to keep backwards compatability with marshal interface
+func (h *HTTPBodyMarshaler) ContentType() string {
+	return h.ContentTypeFromMessage(nil)
+}
+
+// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
+// its specified content type otherwise fall back to the default Marshaler.
+func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.GetContentType()
+	}
+	return h.Marshaler.ContentType()
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.Data, nil
+	}
+	return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
new file mode 100644
index 0000000..f9d3a58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+	return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+	return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
new file mode 100644
index 0000000..f0de351
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
@@ -0,0 +1,262 @@
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "github.com/golang/protobuf/jsonpb".
+// It supports fully functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb jsonpb.Marshaler
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+	if _, ok := v.(proto.Message); !ok {
+		return j.marshalNonProtoField(v)
+	}
+
+	var buf bytes.Buffer
+	if err := j.marshalTo(&buf, v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		buf, err := j.marshalNonProtoField(v)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(buf)
+		return err
+	}
+	return (*jsonpb.Marshaler)(j).Marshal(w, p)
+}
+
+var (
+	// protoMessageType is stored to prevent constant lookup of the same type at runtime.
+	protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshals arbitrary data structure into JSON,
+// but it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+	if v == nil {
+		return []byte("null"), nil
+	}
+	rv := reflect.ValueOf(v)
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return []byte("null"), nil
+		}
+		rv = rv.Elem()
+	}
+
+	if rv.Kind() == reflect.Slice {
+		if rv.IsNil() {
+			if j.EmitDefaults {
+				return []byte("[]"), nil
+			}
+			return []byte("null"), nil
+		}
+
+		if rv.Type().Elem().Implements(protoMessageType) {
+			var buf bytes.Buffer
+			err := buf.WriteByte('[')
+			if err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					err = buf.WriteByte(',')
+					if err != nil {
+						return nil, err
+					}
+				}
+				if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+					return nil, err
+				}
+			}
+			err = buf.WriteByte(']')
+			if err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+	}
+
+	if rv.Kind() == reflect.Map {
+		m := make(map[string]*json.RawMessage)
+		for _, k := range rv.MapKeys() {
+			buf, err := j.Marshal(rv.MapIndex(k).Interface())
+			if err != nil {
+				return nil, err
+			}
+			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+		}
+		if j.Indent != "" {
+			return json.MarshalIndent(m, "", j.Indent)
+		}
+		return json.Marshal(m)
+	}
+	if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
+		return json.Marshal(enum.String())
+	}
+	return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+	return unmarshalJSONPb(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+	d := json.NewDecoder(r)
+	return DecoderWrapper{Decoder: d}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+	*json.Decoder
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+	return decodeJSONPb(d.Decoder, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+	return EncoderFunc(func(v interface{}) error {
+		if err := j.marshalTo(w, v); err != nil {
+			return err
+		}
+		// mimic json.Encoder by adding a newline (makes output
+		// easier to read when it contains multiple encoded items)
+		_, err := w.Write(j.Delimiter())
+		return err
+	})
+}
+
+func unmarshalJSONPb(data []byte, v interface{}) error {
+	d := json.NewDecoder(bytes.NewReader(data))
+	return decodeJSONPb(d, v)
+}
+
+func decodeJSONPb(d *json.Decoder, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		return decodeNonProtoField(d, v)
+	}
+	unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
+	return unmarshaler.UnmarshalNext(d, p)
+}
+
+func decodeNonProtoField(d *json.Decoder, v interface{}) error {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf("%T is not a pointer", v)
+	}
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			rv.Set(reflect.New(rv.Type().Elem()))
+		}
+		if rv.Type().ConvertibleTo(typeProtoMessage) {
+			unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
+			return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
+		}
+		rv = rv.Elem()
+	}
+	if rv.Kind() == reflect.Map {
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rv.Type()))
+		}
+		conv, ok := convFromType[rv.Type().Key().Kind()]
+		if !ok {
+			return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+		}
+
+		m := make(map[string]*json.RawMessage)
+		if err := d.Decode(&m); err != nil {
+			return err
+		}
+		for k, v := range m {
+			result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+			if err := result[1].Interface(); err != nil {
+				return err.(error)
+			}
+			bk := result[0]
+			bv := reflect.New(rv.Type().Elem())
+			if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
+				return err
+			}
+			rv.SetMapIndex(bk, bv.Elem())
+		}
+		return nil
+	}
+	if _, ok := rv.Interface().(protoEnum); ok {
+		var repr interface{}
+		if err := d.Decode(&repr); err != nil {
+			return err
+		}
+		switch repr.(type) {
+		case string:
+			// TODO(yugui) Should use proto.StructProperties?
+			return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+		case float64:
+			rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
+			return nil
+		default:
+			return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+		}
+	}
+	return d.Decode(v)
+}
+
+type protoEnum interface {
+	fmt.Stringer
+	EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+	return []byte("\n")
+}
+
+// allowUnknownFields helps not to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+var allowUnknownFields = true
+
+// DisallowUnknownFields enables option in decoder (unmarshaller) to
+// return an error when it finds an unknown field. This function must be
+// called before using the JSON marshaller.
+func DisallowUnknownFields() {
+	allowUnknownFields = false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
new file mode 100644
index 0000000..f65d1a2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
@@ -0,0 +1,62 @@
+package runtime
+
+import (
+	"io"
+
+	"errors"
+	"github.com/golang/protobuf/proto"
+	"io/ioutil"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType() string {
+	return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return nil, errors.New("unable to marshal non proto field")
+	}
+	return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return errors.New("unable to unmarshal non proto field")
+	}
+	return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+	return DecoderFunc(func(value interface{}) error {
+		buffer, err := ioutil.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		return marshaller.Unmarshal(buffer, value)
+	})
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+	return EncoderFunc(func(value interface{}) error {
+		buffer, err := marshaller.Marshal(value)
+		if err != nil {
+			return err
+		}
+		_, err = writer.Write(buffer)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
new file mode 100644
index 0000000..98fe6e8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
@@ -0,0 +1,48 @@
+package runtime
+
+import (
+	"io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+	// Marshal marshals "v" into byte sequence.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal unmarshals "data" into "v".
+	// "v" must be a pointer value.
+	Unmarshal(data []byte, v interface{}) error
+	// NewDecoder returns a Decoder which reads byte sequence from "r".
+	NewDecoder(r io.Reader) Decoder
+	// NewEncoder returns an Encoder which writes bytes sequence into "w".
+	NewEncoder(w io.Writer) Encoder
+	// ContentType returns the Content-Type which this marshaler is responsible for.
+	ContentType() string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+	Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+	Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+	// Delimiter returns the record seperator for the stream.
+	Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
new file mode 100644
index 0000000..5cc53ae
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
@@ -0,0 +1,91 @@
+package runtime
+
+import (
+	"errors"
+	"net/http"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+	acceptHeader      = http.CanonicalHeaderKey("Accept")
+	contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+	defaultMarshaler = &JSONPb{OrigName: true}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+	for _, acceptVal := range r.Header[acceptHeader] {
+		if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+			outbound = m
+			break
+		}
+	}
+
+	for _, contentTypeVal := range r.Header[contentTypeHeader] {
+		if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
+			inbound = m
+			break
+		}
+	}
+
+	if inbound == nil {
+		inbound = mux.marshalers.mimeMap[MIMEWildcard]
+	}
+	if outbound == nil {
+		outbound = inbound
+	}
+
+	return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+	mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+	if len(mime) == 0 {
+		return errors.New("empty MIME type")
+	}
+
+	m.mimeMap[mime] = marshaler
+
+	return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+	return marshalerRegistry{
+		mimeMap: map[string]Marshaler{
+			MIMEWildcard: defaultMarshaler,
+		},
+	}
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+	return func(mux *ServeMux) {
+		if err := mux.marshalers.add(mime, marshaler); err != nil {
+			panic(err)
+		}
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
new file mode 100644
index 0000000..1da3a58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
@@ -0,0 +1,303 @@
+package runtime
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"net/textproto"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
+// a request is received with a URI path that does not match any registered
+// service method.
+//
+// Since gRPC servers return an "Unimplemented" code for requests with an
+// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
+var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+	// handlers maps HTTP method to a list of handlers.
+	handlers                  map[string][]handler
+	forwardResponseOptions    []func(context.Context, http.ResponseWriter, proto.Message) error
+	marshalers                marshalerRegistry
+	incomingHeaderMatcher     HeaderMatcherFunc
+	outgoingHeaderMatcher     HeaderMatcherFunc
+	metadataAnnotators        []func(context.Context, *http.Request) metadata.MD
+	streamErrorHandler        StreamErrorHandlerFunc
+	protoErrorHandler         ProtoErrorHandlerFunc
+	disablePathLengthFallback bool
+	lastMatchWins             bool
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+	}
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
+// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+func DefaultHeaderMatcher(key string) (string, bool) {
+	key = textproto.CanonicalMIMEHeaderKey(key)
+	if isPermanentHTTPHeader(key) {
+		return MetadataPrefix + key, true
+	} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+		return key[len(MetadataHeaderPrefix):], true
+	}
+	return "", false
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.incomingHeaderMatcher = fn
+	}
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingHeaderMatcher = fn
+	}
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+	}
+}
+
+// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used to handle an error as general proto message defined by gRPC.
+// The response including body and status is not backward compatible with the default error handler.
+// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
+func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.protoErrorHandler = fn
+	}
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.disablePathLengthFallback = true
+	}
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.streamErrorHandler = fn
+	}
+}
+
+// WithLastMatchWins returns a ServeMuxOption that will enable "last
+// match wins" behavior, where if multiple path patterns match a
+// request path, the last one defined in the .proto file will be used.
+func WithLastMatchWins() ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.lastMatchWins = true
+	}
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+	serveMux := &ServeMux{
+		handlers:               make(map[string][]handler),
+		forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+		marshalers:             makeMarshalerMIMERegistry(),
+		streamErrorHandler:     DefaultHTTPStreamErrorHandler,
+	}
+
+	for _, opt := range opts {
+		opt(serveMux)
+	}
+
+	if serveMux.protoErrorHandler != nil {
+		HTTPError = serveMux.protoErrorHandler
+		// OtherErrorHandler is no longer used when protoErrorHandler is set.
+		// Overwritten by a special error handler to return Unknown.
+		OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
+			ctx := context.Background()
+			_, outboundMarshaler := MarshalerForRequest(serveMux, r)
+			sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
+			serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
+		}
+	}
+
+	if serveMux.incomingHeaderMatcher == nil {
+		serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+	}
+
+	if serveMux.outgoingHeaderMatcher == nil {
+		serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
+			return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+		}
+	}
+
+	return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+	if s.lastMatchWins {
+		s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
+	} else {
+		s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
+	}
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+
+	path := r.URL.Path
+	if !strings.HasPrefix(path, "/") {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+		}
+		return
+	}
+
+	components := strings.Split(path[1:], "/")
+	l := len(components)
+	var verb string
+	if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+		}
+		return
+	} else if idx > 0 {
+		c := components[l-1]
+		components[l-1], verb = c[:idx], c[idx+1:]
+	}
+
+	if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+		r.Method = strings.ToUpper(override)
+		if err := r.ParseForm(); err != nil {
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				sterr := status.Error(codes.InvalidArgument, err.Error())
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			} else {
+				OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+			}
+			return
+		}
+	}
+	for _, h := range s.handlers[r.Method] {
+		pathParams, err := h.pat.Match(components, verb)
+		if err != nil {
+			continue
+		}
+		h.h(w, r, pathParams)
+		return
+	}
+
+	// lookup other methods to handle fallback from GET to POST and
+	// to determine if it is MethodNotAllowed or NotFound.
+	for m, handlers := range s.handlers {
+		if m == r.Method {
+			continue
+		}
+		for _, h := range handlers {
+			pathParams, err := h.pat.Match(components, verb)
+			if err != nil {
+				continue
+			}
+			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
+			if s.isPathLengthFallback(r) {
+				if err := r.ParseForm(); err != nil {
+					if s.protoErrorHandler != nil {
+						_, outboundMarshaler := MarshalerForRequest(s, r)
+						sterr := status.Error(codes.InvalidArgument, err.Error())
+						s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+					} else {
+						OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+					}
+					return
+				}
+				h.h(w, r, pathParams)
+				return
+			}
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+			} else {
+				OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
+			}
+			return
+		}
+	}
+
+	if s.protoErrorHandler != nil {
+		_, outboundMarshaler := MarshalerForRequest(s, r)
+		s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+	} else {
+		OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+	}
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+	return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+	return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+	pat Pattern
+	h   HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
new file mode 100644
index 0000000..0905369
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
@@ -0,0 +1,262 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+	ErrNotMatch = errors.New("not match to the path pattern")
+	// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+	ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type op struct {
+	code    utilities.OpCode
+	operand int
+}
+
+// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
+type Pattern struct {
+	// ops is a list of operations
+	ops []op
+	// pool is a constant pool indexed by the operands or vars.
+	pool []string
+	// vars is a list of variables names to be bound by this pattern
+	vars []string
+	// stacksize is the max depth of the stack
+	stacksize int
+	// tailLen is the length of the fixed-size segments after a deep wildcard
+	tailLen int
+	// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+	verb string
+	// assumeColonVerb indicates whether a path suffix after a final
+	// colon may only be interpreted as a verb.
+	assumeColonVerb bool
+}
+
+type patternOptions struct {
+	assumeColonVerb bool
+}
+
+// PatternOpt is an option for creating Patterns.
+type PatternOpt func(*patternOptions)
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
+	options := patternOptions{
+		assumeColonVerb: true,
+	}
+	for _, o := range opts {
+		o(&options)
+	}
+
+	if version != 1 {
+		grpclog.Infof("unsupported version: %d", version)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	l := len(ops)
+	if l%2 != 0 {
+		grpclog.Infof("odd number of ops codes: %d", l)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	var (
+		typedOps        []op
+		stack, maxstack int
+		tailLen         int
+		pushMSeen       bool
+		vars            []string
+	)
+	for i := 0; i < l; i += 2 {
+		op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpPushM:
+			if pushMSeen {
+				grpclog.Infof("pushM appears twice")
+				return Pattern{}, ErrInvalidPattern
+			}
+			pushMSeen = true
+			stack++
+		case utilities.OpLitPush:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("negative literal index: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpConcatN:
+			if op.operand <= 0 {
+				grpclog.Infof("negative concat size: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack -= op.operand
+			if stack < 0 {
+				grpclog.Print("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack++
+		case utilities.OpCapture:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("variable name index out of bound: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			v := pool[op.operand]
+			op.operand = len(vars)
+			vars = append(vars, v)
+			stack--
+			if stack < 0 {
+				grpclog.Infof("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+		default:
+			grpclog.Infof("invalid opcode: %d", op.code)
+			return Pattern{}, ErrInvalidPattern
+		}
+
+		if maxstack < stack {
+			maxstack = stack
+		}
+		typedOps = append(typedOps, op)
+	}
+	return Pattern{
+		ops:             typedOps,
+		pool:            pool,
+		vars:            vars,
+		stacksize:       maxstack,
+		tailLen:         tailLen,
+		verb:            verb,
+		assumeColonVerb: options.assumeColonVerb,
+	}, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+	if err != nil {
+		grpclog.Fatalf("Pattern initialization failed: %v", err)
+	}
+	return p
+}
+
+// Match examines components if it matches to the Pattern.
+// If it matches, the function returns a mapping from field paths to their captured values.
+// If otherwise, the function returns an error.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+	if p.verb != verb {
+		if p.assumeColonVerb || p.verb != "" {
+			return nil, ErrNotMatch
+		}
+		if len(components) == 0 {
+			components = []string{":" + verb}
+		} else {
+			components = append([]string{}, components...)
+			components[len(components)-1] += ":" + verb
+		}
+		verb = ""
+	}
+
+	var pos int
+	stack := make([]string, 0, p.stacksize)
+	captured := make([]string, len(p.vars))
+	l := len(components)
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush, utilities.OpLitPush:
+			if pos >= l {
+				return nil, ErrNotMatch
+			}
+			c := components[pos]
+			if op.code == utilities.OpLitPush {
+				if lit := p.pool[op.operand]; c != lit {
+					return nil, ErrNotMatch
+				}
+			}
+			stack = append(stack, c)
+			pos++
+		case utilities.OpPushM:
+			end := len(components)
+			if end < pos+p.tailLen {
+				return nil, ErrNotMatch
+			}
+			end -= p.tailLen
+			stack = append(stack, strings.Join(components[pos:end], "/"))
+			pos = end
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			captured[op.operand] = stack[n]
+			stack = stack[:n]
+		}
+	}
+	if pos < l {
+		return nil, ErrNotMatch
+	}
+	bindings := make(map[string]string)
+	for i, val := range captured {
+		bindings[p.vars[i]] = val
+	}
+	return bindings, nil
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+	var stack []string
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			stack = append(stack, "*")
+		case utilities.OpLitPush:
+			stack = append(stack, p.pool[op.operand])
+		case utilities.OpPushM:
+			stack = append(stack, "**")
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+		}
+	}
+	segs := strings.Join(stack, "/")
+	if p.verb != "" {
+		return fmt.Sprintf("/%s:%s", segs, p.verb)
+	}
+	return "/" + segs
+}
+
+// AssumeColonVerbOpt indicates whether a path suffix after a final
+// colon may only be interpreted as a verb.
+func AssumeColonVerbOpt(val bool) PatternOpt {
+	return PatternOpt(func(o *patternOptions) {
+		o.assumeColonVerb = val
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
new file mode 100644
index 0000000..a3151e2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+	"github.com/golang/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+	return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+	b, err := Bool(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+	f, err := Float64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+	f, err := Float32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+	i, err := Int64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+	i, err := Int32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+	i, err := Uint64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+	i, err := Uint32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
new file mode 100644
index 0000000..ca76324
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
@@ -0,0 +1,106 @@
+package runtime
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/grpc-ecosystem/grpc-gateway/internal"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
+// a proto struct used to represent error at the end of a stream.
+type StreamErrorHandlerFunc func(context.Context, error) *StreamError
+
+// StreamError is the payload for the final message in a server stream in the event that the server returns an
+// error after a response message has already been sent.
+type StreamError internal.StreamError
+
+// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
+type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
+
+// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a Status message marshaled by a Marshaler.
+//
+// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
+func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	// return Internal when Marshal failed
+	const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	w.Header().Del("Trailer")
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		pb := s.Proto()
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	buf, merr := marshaler.Marshal(s.Proto())
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
+// default logic.
+//
+// It extracts the gRPC status from err if possible. The fields of the status are
+// used to populate the returned StreamError, and the HTTP status code is derived
+// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
+// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
+func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
+	grpcCode := codes.Unknown
+	grpcMessage := err.Error()
+	var grpcDetails []*any.Any
+	if s, ok := status.FromError(err); ok {
+		grpcCode = s.Code()
+		grpcMessage = s.Message()
+		grpcDetails = s.Proto().GetDetails()
+	}
+	httpCode := HTTPStatusFromCode(grpcCode)
+	return &StreamError{
+		GrpcCode:   int32(grpcCode),
+		HttpCode:   int32(httpCode),
+		Message:    grpcMessage,
+		HttpStatus: http.StatusText(httpCode),
+		Details:    grpcDetails,
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
new file mode 100644
index 0000000..ee0207e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
@@ -0,0 +1,389 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
+
+// PopulateQueryParameters populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	for key, values := range values {
+		match := valuesKeyRegexp.FindStringSubmatch(key)
+		if len(match) == 3 {
+			key = match[1]
+			values = append([]string{match[2]}, values...)
+		}
+		fieldPath := strings.Split(key, ".")
+		if filter.HasCommonPrefix(fieldPath) {
+			continue
+		}
+		if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+// It instantiates missing protobuf fields as it goes.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+	fieldPath := strings.Split(fieldPathString, ".")
+	return populateFieldValueFromPath(msg, fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
+	m := reflect.ValueOf(msg)
+	if m.Kind() != reflect.Ptr {
+		return fmt.Errorf("unexpected type %T: %v", msg, msg)
+	}
+	var props *proto.Properties
+	m = m.Elem()
+	for i, fieldName := range fieldPath {
+		isLast := i == len(fieldPath)-1
+		if !isLast && m.Kind() != reflect.Struct {
+			return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
+		}
+		var f reflect.Value
+		var err error
+		f, props, err = fieldByProtoName(m, fieldName)
+		if err != nil {
+			return err
+		} else if !f.IsValid() {
+			grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
+			return nil
+		}
+
+		switch f.Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			m = f
+		case reflect.Slice:
+			if !isLast {
+				return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
+			}
+			// Handle []byte
+			if f.Type().Elem().Kind() == reflect.Uint8 {
+				m = f
+				break
+			}
+			return populateRepeatedField(f, values, props)
+		case reflect.Ptr:
+			if f.IsNil() {
+				m = reflect.New(f.Type().Elem())
+				f.Set(m.Convert(f.Type()))
+			}
+			m = f.Elem()
+			continue
+		case reflect.Struct:
+			m = f
+			continue
+		case reflect.Map:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			return populateMapField(f, values, props)
+		default:
+			return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
+		}
+	}
+	switch len(values) {
+	case 0:
+		return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
+	case 1:
+	default:
+		grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
+	}
+	return populateField(m, values[0], props)
+}
+
+// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
+// "m" must be a struct value. It returns zero reflect.Value if no such field found.
+func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
+	props := proto.GetProperties(m.Type())
+
+	// look up field name in oneof map
+	if op, ok := props.OneofTypes[name]; ok {
+		v := reflect.New(op.Type.Elem())
+		field := m.Field(op.Field)
+		if !field.IsNil() {
+			return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
+		}
+		field.Set(v)
+		return v.Elem().Field(0), op.Prop, nil
+	}
+
+	for _, p := range props.Prop {
+		if p.OrigName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+		if p.JSONName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+	}
+	return reflect.Value{}, nil, nil
+}
+
+func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
+	if len(values) != 2 {
+		return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
+	}
+
+	key, value := values[0], values[1]
+	keyType := f.Type().Key()
+	valueType := f.Type().Elem()
+	if f.IsNil() {
+		f.Set(reflect.MakeMap(f.Type()))
+	}
+
+	keyConv, ok := convFromType[keyType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
+	}
+	valueConv, ok := convFromType[valueType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
+	}
+
+	keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
+	if err := keyV[1].Interface(); err != nil {
+		return err.(error)
+	}
+	valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := valueV[1].Interface(); err != nil {
+		return err.(error)
+	}
+
+	f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
+
+	return nil
+}
+
+func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
+	elemType := f.Type().Elem()
+
+	// is the destination field a slice of an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnumRepeated(f, values, enumValMap)
+	}
+
+	conv, ok := convFromType[elemType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported field type %s", elemType)
+	}
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
+		if err := result[1].Interface(); err != nil {
+			return err.(error)
+		}
+		f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
+	}
+	return nil
+}
+
+func populateField(f reflect.Value, value string, props *proto.Properties) error {
+	i := f.Addr().Interface()
+
+	// Handle protobuf well known types
+	var name string
+	switch m := i.(type) {
+	case interface{ XXX_WellKnownType() string }:
+		name = m.XXX_WellKnownType()
+	case proto.Message:
+		const wktPrefix = "google.protobuf."
+		if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
+			name = fullName[len(wktPrefix):]
+		}
+	}
+	switch name {
+	case "Timestamp":
+		if value == "null" {
+			f.FieldByName("Seconds").SetInt(0)
+			f.FieldByName("Nanos").SetInt(0)
+			return nil
+		}
+
+		t, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+		f.FieldByName("Seconds").SetInt(int64(t.Unix()))
+		f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
+		return nil
+	case "Duration":
+		if value == "null" {
+			f.FieldByName("Seconds").SetInt(0)
+			f.FieldByName("Nanos").SetInt(0)
+			return nil
+		}
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+
+		ns := d.Nanoseconds()
+		s := ns / 1e9
+		ns %= 1e9
+		f.FieldByName("Seconds").SetInt(s)
+		f.FieldByName("Nanos").SetInt(ns)
+		return nil
+	case "DoubleValue":
+		fallthrough
+	case "FloatValue":
+		float64Val, err := strconv.ParseFloat(value, 64)
+		if err != nil {
+			return fmt.Errorf("bad DoubleValue: %s", value)
+		}
+		f.FieldByName("Value").SetFloat(float64Val)
+		return nil
+	case "Int64Value":
+		fallthrough
+	case "Int32Value":
+		int64Val, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return fmt.Errorf("bad DoubleValue: %s", value)
+		}
+		f.FieldByName("Value").SetInt(int64Val)
+		return nil
+	case "UInt64Value":
+		fallthrough
+	case "UInt32Value":
+		uint64Val, err := strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return fmt.Errorf("bad DoubleValue: %s", value)
+		}
+		f.FieldByName("Value").SetUint(uint64Val)
+		return nil
+	case "BoolValue":
+		if value == "true" {
+			f.FieldByName("Value").SetBool(true)
+		} else if value == "false" {
+			f.FieldByName("Value").SetBool(false)
+		} else {
+			return fmt.Errorf("bad BoolValue: %s", value)
+		}
+		return nil
+	case "StringValue":
+		f.FieldByName("Value").SetString(value)
+		return nil
+	case "BytesValue":
+		bytesVal, err := base64.StdEncoding.DecodeString(value)
+		if err != nil {
+			return fmt.Errorf("bad BytesValue: %s", value)
+		}
+		f.FieldByName("Value").SetBytes(bytesVal)
+		return nil
+	case "FieldMask":
+		p := f.FieldByName("Paths")
+		for _, v := range strings.Split(value, ",") {
+			if v != "" {
+				p.Set(reflect.Append(p, reflect.ValueOf(v)))
+			}
+		}
+		return nil
+	}
+
+	// Handle Time and Duration stdlib types
+	switch t := i.(type) {
+	case *time.Time:
+		pt, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+		*t = pt
+		return nil
+	case *time.Duration:
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+		*t = d
+		return nil
+	}
+
+	// is the destination field an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnum(f, value, enumValMap)
+	}
+
+	conv, ok := convFromType[f.Kind()]
+	if !ok {
+		return fmt.Errorf("field type %T is not supported in query parameters", i)
+	}
+	result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := result[1].Interface(); err != nil {
+		return err.(error)
+	}
+	f.Set(result[0].Convert(f.Type()))
+	return nil
+}
+
+func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
+	// see if it's an enumeration string
+	if enumVal, ok := enumValMap[value]; ok {
+		return reflect.ValueOf(enumVal).Convert(t), nil
+	}
+
+	// check for an integer that matches an enumeration value
+	eVal, err := strconv.Atoi(value)
+	if err != nil {
+		return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+	}
+	for _, v := range enumValMap {
+		if v == int32(eVal) {
+			return reflect.ValueOf(eVal).Convert(t), nil
+		}
+	}
+	return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+}
+
+func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
+	cval, err := convertEnum(value, f.Type(), enumValMap)
+	if err != nil {
+		return err
+	}
+	f.Set(cval)
+	return nil
+}
+
+func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
+	elemType := f.Type().Elem()
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result, err := convertEnum(v, elemType, enumValMap)
+		if err != nil {
+			return err
+		}
+		f.Index(i).Set(result)
+	}
+	return nil
+}
+
+var (
+	convFromType = map[reflect.Kind]reflect.Value{
+		reflect.String:  reflect.ValueOf(String),
+		reflect.Bool:    reflect.ValueOf(Bool),
+		reflect.Float64: reflect.ValueOf(Float64),
+		reflect.Float32: reflect.ValueOf(Float32),
+		reflect.Int64:   reflect.ValueOf(Int64),
+		reflect.Int32:   reflect.ValueOf(Int32),
+		reflect.Uint64:  reflect.ValueOf(Uint64),
+		reflect.Uint32:  reflect.ValueOf(Uint32),
+		reflect.Slice:   reflect.ValueOf(Bytes),
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
new file mode 100644
index 0000000..7109d79
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "pattern.go",
+        "readerfactory.go",
+        "trie.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = ["trie_test.go"],
+    embed = [":go_default_library"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
new file mode 100644
index 0000000..cf79a4d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
new file mode 100644
index 0000000..dfe7de4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+	// OpNop does nothing
+	OpNop = OpCode(iota)
+	// OpPush pushes a component to stack
+	OpPush
+	// OpLitPush pushes a component to stack if it matches to the literal
+	OpLitPush
+	// OpPushM concatenates the remaining components and pushes it to stack
+	OpPushM
+	// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+	OpConcatN
+	// OpCapture pops an item and binds it to the variable
+	OpCapture
+	// OpEnd is the least positive invalid opcode.
+	OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
new file mode 100644
index 0000000..6dd3854
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
@@ -0,0 +1,20 @@
+package utilities
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+
+	return func() io.Reader {
+		return bytes.NewReader(b)
+	}, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
new file mode 100644
index 0000000..c2b7b30
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
@@ -0,0 +1,177 @@
+package utilities
+
+import (
+	"sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+	// Encoding keeps an encoding from string to int
+	Encoding map[string]int
+	// Base is the base array of Double Array
+	Base []int
+	// Check is the check array of Double Array
+	Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+	da := &DoubleArray{Encoding: make(map[string]int)}
+	if len(seqs) == 0 {
+		return da
+	}
+
+	encoded := registerTokens(da, seqs)
+	sort.Sort(byLex(encoded))
+
+	root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+	addSeqs(da, encoded, 0, root)
+
+	for i := len(da.Base); i > 0; i-- {
+		if da.Check[i-1] != 0 {
+			da.Base = da.Base[:i]
+			da.Check = da.Check[:i]
+			break
+		}
+	}
+	return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+	var result [][]int
+	for _, seq := range seqs {
+		var encoded []int
+		for _, token := range seq {
+			if _, ok := da.Encoding[token]; !ok {
+				da.Encoding[token] = len(da.Encoding)
+			}
+			encoded = append(encoded, da.Encoding[token])
+		}
+		result = append(result, encoded)
+	}
+	for i := range result {
+		result[i] = append(result[i], len(da.Encoding))
+	}
+	return result
+}
+
+type node struct {
+	row, col    int
+	left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+	return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+	var result []*node
+	lastVal := int(-1)
+	last := new(node)
+	for i := n.left; i < n.right; i++ {
+		if lastVal == seqs[i][n.col+1] {
+			continue
+		}
+		last.right = i
+		last = &node{
+			row:  i,
+			col:  n.col + 1,
+			left: i,
+		}
+		result = append(result, last)
+	}
+	last.right = n.right
+	return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+	ensureSize(da, pos)
+
+	children := n.children(seqs)
+	var i int
+	for i = 1; ; i++ {
+		ok := func() bool {
+			for _, child := range children {
+				code := child.value(seqs)
+				j := i + code
+				ensureSize(da, j)
+				if da.Check[j] != 0 {
+					return false
+				}
+			}
+			return true
+		}()
+		if ok {
+			break
+		}
+	}
+	da.Base[pos] = i
+	for _, child := range children {
+		code := child.value(seqs)
+		j := i + code
+		da.Check[j] = pos + 1
+	}
+	terminator := len(da.Encoding)
+	for _, child := range children {
+		code := child.value(seqs)
+		if code == terminator {
+			continue
+		}
+		j := i + code
+		addSeqs(da, seqs, j, *child)
+	}
+}
+
+func ensureSize(da *DoubleArray, i int) {
+	for i >= len(da.Base) {
+		da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+		da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+	}
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int      { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+	si := l[i]
+	sj := l[j]
+	var k int
+	for k = 0; k < len(si) && k < len(sj); k++ {
+		if si[k] < sj[k] {
+			return true
+		}
+		if si[k] > sj[k] {
+			return false
+		}
+	}
+	if k < len(sj) {
+		return true
+	}
+	return false
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+	if len(da.Base) == 0 {
+		return false
+	}
+
+	var i int
+	for _, t := range seq {
+		code, ok := da.Encoding[t]
+		if !ok {
+			break
+		}
+		j := da.Base[i] + code
+		if len(da.Check) <= j || da.Check[j] != i+1 {
+			break
+		}
+		i = j
+	}
+	j := da.Base[i] + len(da.Encoding)
+	if len(da.Check) <= j || da.Check[j] != i+1 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml
new file mode 100644
index 0000000..0f0728d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/.travis.yml
@@ -0,0 +1,44 @@
+language: go
+
+os:
+  - linux
+  - osx
+
+go:
+  - 1.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+
+install:
+  # go-flags
+  - go get -d -v ./...
+  - go build -v ./...
+
+  # linting
+  - go get github.com/golang/lint/golint
+
+  # code coverage
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/onsi/ginkgo/ginkgo
+  - go get github.com/modocache/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi
+
+script:
+  # go-flags
+  - $(exit $(gofmt -l . | wc -l))
+  - go test -v ./...
+
+  # linting
+  - go tool vet -all=true -v=true . || true
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./...
+
+  # code coverage
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
+
+env:
+  # coveralls.io
+  secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU="
diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE
new file mode 100644
index 0000000..bcca0d5
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012 Jesse van den Kieboom. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+     copyright notice, this list of conditions and the following disclaimer
+     in the documentation and/or other materials provided with the
+     distribution.
+   * Neither the name of Google Inc. nor the names of its
+     contributors may be used to endorse or promote products derived from
+     this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md
new file mode 100644
index 0000000..3b02394
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/README.md
@@ -0,0 +1,134 @@
+go-flags: a go library for parsing command line arguments
+=========================================================
+
+[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master)
+
+This library provides similar functionality to the builtin flag library of
+go, but provides much more functionality and nicer formatting. From the
+documentation:
+
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go builtin flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+Supported features:
+* Options with short names (-v)
+* Options with long names (--verbose)
+* Options with and without arguments (bool v.s. other type)
+* Options with optional arguments and default values
+* Multiple option groups each containing a set of options
+* Generate and print well-formatted help message
+* Passing remaining command line arguments after -- (optional)
+* Ignoring unknown command line options (optional)
+* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+* Supports multiple short options -aux
+* Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+* Supports same option multiple times (can store in slice or last option counts)
+* Supports maps
+* Supports function callbacks
+* Supports namespaces for (nested) option groups
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+```go
+type Options struct {
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+}
+```
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Example:
+--------
+```go
+var opts struct {
+	// Slice of bool will append 'true' each time the option
+	// is encountered (can be set multiple times, like -vvv)
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+
+	// Example of automatic marshalling to desired type (uint)
+	Offset uint `long:"offset" description:"Offset"`
+
+	// Example of a callback, called each time the option is found.
+	Call func(string) `short:"c" description:"Call phone number"`
+
+	// Example of a required flag
+	Name string `short:"n" long:"name" description:"A name" required:"true"`
+
+	// Example of a value name
+	File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
+
+	// Example of a pointer
+	Ptr *int `short:"p" description:"A pointer to an integer"`
+
+	// Example of a slice of strings
+	StringSlice []string `short:"s" description:"A slice of strings"`
+
+	// Example of a slice of pointers
+	PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+
+	// Example of a map
+	IntMap map[string]int `long:"intmap" description:"A map from string to int"`
+}
+
+// Callback which will invoke callto:<argument> to call a number.
+// Note that this works just on OS X (and probably only with
+// Skype) but it shows the idea.
+opts.Call = func(num string) {
+	cmd := exec.Command("open", "callto:"+num)
+	cmd.Start()
+	cmd.Process.Release()
+}
+
+// Make some fake arguments to parse.
+args := []string{
+	"-vv",
+	"--offset=5",
+	"-n", "Me",
+	"-p", "3",
+	"-s", "hello",
+	"-s", "world",
+	"--ptrslice", "hello",
+	"--ptrslice", "world",
+	"--intmap", "a:1",
+	"--intmap", "b:5",
+	"arg1",
+	"arg2",
+	"arg3",
+}
+
+// Parse flags from `args'. Note that here we use flags.ParseArgs for
+// the sake of making a working example. Normally, you would simply use
+// flags.Parse(&opts) which uses os.Args
+args, err := flags.ParseArgs(&opts, args)
+
+if err != nil {
+	panic(err)
+}
+
+fmt.Printf("Verbosity: %v\n", opts.Verbose)
+fmt.Printf("Offset: %d\n", opts.Offset)
+fmt.Printf("Name: %s\n", opts.Name)
+fmt.Printf("Ptr: %d\n", *opts.Ptr)
+fmt.Printf("StringSlice: %v\n", opts.StringSlice)
+fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
+fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"])
+fmt.Printf("Remaining args: %s\n", strings.Join(args, " "))
+
+// Output: Verbosity: [true true]
+// Offset: 5
+// Name: Me
+// Ptr: 3
+// StringSlice: [hello world]
+// PtrSlice: [hello world]
+// IntMap: [a:1 b:5]
+// Remaining args: arg1 arg2 arg3
+```
+
+More information can be found in the godocs: <http://godoc.org/github.com/jessevdk/go-flags>
diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go
new file mode 100644
index 0000000..8ec6204
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/arg.go
@@ -0,0 +1,27 @@
+package flags
+
+import (
+	"reflect"
+)
+
+// Arg represents a positional argument on the command line.
+type Arg struct {
+	// The name of the positional argument (used in the help)
+	Name string
+
+	// A description of the positional argument (used in the help)
+	Description string
+
+	// The minimal number of required positional arguments
+	Required int
+
+	// The maximum number of required positional arguments
+	RequiredMaximum int
+
+	value reflect.Value
+	tag   multiTag
+}
+
+func (a *Arg) isRemaining() bool {
+	return a.value.Type().Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
new file mode 100644
index 0000000..c494f61
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+echo '# linux arm7'
+GOARM=7 GOARCH=arm GOOS=linux go build
+echo '# linux arm5'
+GOARM=5 GOARCH=arm GOOS=linux go build
+echo '# windows 386'
+GOARCH=386 GOOS=windows go build
+echo '# windows amd64'
+GOARCH=amd64 GOOS=windows go build
+echo '# darwin'
+GOARCH=amd64 GOOS=darwin go build
+echo '# freebsd'
+GOARCH=amd64 GOOS=freebsd go build
diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go
new file mode 100644
index 0000000..3b51875
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/closest.go
@@ -0,0 +1,59 @@
+package flags
+
+func levenshtein(s string, t string) int {
+	if len(s) == 0 {
+		return len(t)
+	}
+
+	if len(t) == 0 {
+		return len(s)
+	}
+
+	dists := make([][]int, len(s)+1)
+	for i := range dists {
+		dists[i] = make([]int, len(t)+1)
+		dists[i][0] = i
+	}
+
+	for j := range t {
+		dists[0][j] = j
+	}
+
+	for i, sc := range s {
+		for j, tc := range t {
+			if sc == tc {
+				dists[i+1][j+1] = dists[i][j]
+			} else {
+				dists[i+1][j+1] = dists[i][j] + 1
+				if dists[i+1][j] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i+1][j] + 1
+				}
+				if dists[i][j+1] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i][j+1] + 1
+				}
+			}
+		}
+	}
+
+	return dists[len(s)][len(t)]
+}
+
+func closestChoice(cmd string, choices []string) (string, int) {
+	if len(choices) == 0 {
+		return "", 0
+	}
+
+	mincmd := -1
+	mindist := -1
+
+	for i, c := range choices {
+		l := levenshtein(cmd, c)
+
+		if mincmd < 0 || l < mindist {
+			mindist = l
+			mincmd = i
+		}
+	}
+
+	return choices[mincmd], mindist
+}
diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go
new file mode 100644
index 0000000..486bacb
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/command.go
@@ -0,0 +1,465 @@
+package flags
+
+import (
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Command represents an application command. Commands can be added to the
+// parser (which itself is a command) and are selected/executed when its name
+// is specified on the command line. The Command type embeds a Group and
+// therefore also carries a set of command specific options.
+type Command struct {
+	// Embedded, see Group for more information
+	*Group
+
+	// The name by which the command can be invoked
+	Name string
+
+	// The active sub command (set by parsing) or nil
+	Active *Command
+
+	// Whether subcommands are optional
+	SubcommandsOptional bool
+
+	// Aliases for the command
+	Aliases []string
+
+	// Whether positional arguments are required
+	ArgsRequired bool
+
+	commands            []*Command
+	hasBuiltinHelpGroup bool
+	args                []*Arg
+}
+
+// Commander is an interface which can be implemented by any command added in
+// the options. When implemented, the Execute method will be called for the last
+// specified (sub)command providing the remaining command line arguments.
+type Commander interface {
+	// Execute will be called for the last active (sub)command. The
+	// args argument contains the remaining command line arguments. The
+	// error that Execute returns will be eventually passed out of the
+	// Parse method of the Parser.
+	Execute(args []string) error
+}
+
+// Usage is an interface which can be implemented to show a custom usage string
+// in the help message shown for a command.
+type Usage interface {
+	// Usage is called for commands to allow customized printing of command
+	// usage in the generated help message.
+	Usage() string
+}
+
+type lookup struct {
+	shortNames map[string]*Option
+	longNames  map[string]*Option
+
+	commands map[string]*Command
+}
+
+// AddCommand adds a new command to the parser with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the command. The provided data can implement the Command and
+// Usage interfaces.
+func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) {
+	cmd := newCommand(command, shortDescription, longDescription, data)
+
+	cmd.parent = c
+
+	if err := cmd.scan(); err != nil {
+		return nil, err
+	}
+
+	c.commands = append(c.commands, cmd)
+	return cmd, nil
+}
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = c
+
+	if err := group.scanType(c.scanSubcommandHandler(group)); err != nil {
+		return nil, err
+	}
+
+	c.groups = append(c.groups, group)
+	return group, nil
+}
+
+// Commands returns a list of subcommands of this command.
+func (c *Command) Commands() []*Command {
+	return c.commands
+}
+
+// Find locates the subcommand with the given name and returns it. If no such
+// command can be found Find will return nil.
+func (c *Command) Find(name string) *Command {
+	for _, cc := range c.commands {
+		if cc.match(name) {
+			return cc
+		}
+	}
+
+	return nil
+}
+
+// FindOptionByLongName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByLongName(longName string) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByLongName(longName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// FindOptionByShortName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByShortName(shortName rune) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByShortName(shortName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// Args returns a list of positional arguments associated with this command.
+func (c *Command) Args() []*Arg {
+	ret := make([]*Arg, len(c.args))
+	copy(ret, c.args)
+
+	return ret
+}
+
+func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command {
+	return &Command{
+		Group: newGroup(shortDescription, longDescription, data),
+		Name:  name,
+	}
+}
+
+func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler {
+	f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+		mtag := newMultiTag(string(sfield.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return true, err
+		}
+
+		positional := mtag.Get("positional-args")
+
+		if len(positional) != 0 {
+			stype := realval.Type()
+
+			for i := 0; i < stype.NumField(); i++ {
+				field := stype.Field(i)
+
+				m := newMultiTag((string(field.Tag)))
+
+				if err := m.Parse(); err != nil {
+					return true, err
+				}
+
+				name := m.Get("positional-arg-name")
+
+				if len(name) == 0 {
+					name = field.Name
+				}
+
+				required := -1
+				requiredMaximum := -1
+
+				sreq := m.Get("required")
+
+				if sreq != "" {
+					required = 1
+
+					rng := strings.SplitN(sreq, "-", 2)
+
+					if len(rng) > 1 {
+						if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil {
+							required = int(preq)
+						}
+
+						if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil {
+							requiredMaximum = int(preq)
+						}
+					} else {
+						if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil {
+							required = int(preq)
+						}
+					}
+				}
+
+				arg := &Arg{
+					Name:            name,
+					Description:     m.Get("description"),
+					Required:        required,
+					RequiredMaximum: requiredMaximum,
+
+					value: realval.Field(i),
+					tag:   m,
+				}
+
+				c.args = append(c.args, arg)
+
+				if len(mtag.Get("required")) != 0 {
+					c.ArgsRequired = true
+				}
+			}
+
+			return true, nil
+		}
+
+		subcommand := mtag.Get("command")
+
+		if len(subcommand) != 0 {
+			var ptrval reflect.Value
+
+			if realval.Kind() == reflect.Ptr {
+				ptrval = realval
+
+				if ptrval.IsNil() {
+					ptrval.Set(reflect.New(ptrval.Type().Elem()))
+				}
+			} else {
+				ptrval = realval.Addr()
+			}
+
+			shortDescription := mtag.Get("description")
+			longDescription := mtag.Get("long-description")
+			subcommandsOptional := mtag.Get("subcommands-optional")
+			aliases := mtag.GetMany("alias")
+
+			subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface())
+
+			if err != nil {
+				return true, err
+			}
+
+			subc.Hidden = mtag.Get("hidden") != ""
+
+			if len(subcommandsOptional) > 0 {
+				subc.SubcommandsOptional = true
+			}
+
+			if len(aliases) > 0 {
+				subc.Aliases = aliases
+			}
+
+			return true, nil
+		}
+
+		return parentg.scanSubGroupHandler(realval, sfield)
+	}
+
+	return f
+}
+
+func (c *Command) scan() error {
+	return c.scanType(c.scanSubcommandHandler(c.Group))
+}
+
+func (c *Command) eachOption(f func(*Command, *Group, *Option)) {
+	c.eachCommand(func(c *Command) {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				f(c, g, option)
+			}
+		})
+	}, true)
+}
+
+func (c *Command) eachCommand(f func(*Command), recurse bool) {
+	f(c)
+
+	for _, cc := range c.commands {
+		if recurse {
+			cc.eachCommand(f, true)
+		} else {
+			f(cc)
+		}
+	}
+}
+
+func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) {
+	c.eachGroup(func(g *Group) {
+		f(c, g)
+	})
+
+	if c.Active != nil {
+		c.Active.eachActiveGroup(f)
+	}
+}
+
+func (c *Command) addHelpGroups(showHelp func() error) {
+	if !c.hasBuiltinHelpGroup {
+		c.addHelpGroup(showHelp)
+		c.hasBuiltinHelpGroup = true
+	}
+
+	for _, cc := range c.commands {
+		cc.addHelpGroups(showHelp)
+	}
+}
+
+func (c *Command) makeLookup() lookup {
+	ret := lookup{
+		shortNames: make(map[string]*Option),
+		longNames:  make(map[string]*Option),
+		commands:   make(map[string]*Command),
+	}
+
+	parent := c.parent
+
+	var parents []*Command
+
+	for parent != nil {
+		if cmd, ok := parent.(*Command); ok {
+			parents = append(parents, cmd)
+			parent = cmd.parent
+		} else {
+			parent = nil
+		}
+	}
+
+	for i := len(parents) - 1; i >= 0; i-- {
+		parents[i].fillLookup(&ret, true)
+	}
+
+	c.fillLookup(&ret, false)
+	return ret
+}
+
+func (c *Command) fillLookup(ret *lookup, onlyOptions bool) {
+	c.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.ShortName != 0 {
+				ret.shortNames[string(option.ShortName)] = option
+			}
+
+			if len(option.LongName) > 0 {
+				ret.longNames[option.LongNameWithNamespace()] = option
+			}
+		}
+	})
+
+	if onlyOptions {
+		return
+	}
+
+	for _, subcommand := range c.commands {
+		ret.commands[subcommand.Name] = subcommand
+
+		for _, a := range subcommand.Aliases {
+			ret.commands[a] = subcommand
+		}
+	}
+}
+
+func (c *Command) groupByName(name string) *Group {
+	if grp := c.Group.groupByName(name); grp != nil {
+		return grp
+	}
+
+	for _, subc := range c.commands {
+		prefix := subc.Name + "."
+
+		if strings.HasPrefix(name, prefix) {
+			if grp := subc.groupByName(name[len(prefix):]); grp != nil {
+				return grp
+			}
+		} else if name == subc.Name {
+			return subc.Group
+		}
+	}
+
+	return nil
+}
+
+type commandList []*Command
+
+func (c commandList) Less(i, j int) bool {
+	return c[i].Name < c[j].Name
+}
+
+func (c commandList) Len() int {
+	return len(c)
+}
+
+func (c commandList) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+func (c *Command) sortedVisibleCommands() []*Command {
+	ret := commandList(c.visibleCommands())
+	sort.Sort(ret)
+
+	return []*Command(ret)
+}
+
+func (c *Command) visibleCommands() []*Command {
+	ret := make([]*Command, 0, len(c.commands))
+
+	for _, cmd := range c.commands {
+		if !cmd.Hidden {
+			ret = append(ret, cmd)
+		}
+	}
+
+	return ret
+}
+
+func (c *Command) match(name string) bool {
+	if c.Name == name {
+		return true
+	}
+
+	for _, v := range c.Aliases {
+		if v == name {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (c *Command) hasCliOptions() bool {
+	ret := false
+
+	c.eachGroup(func(g *Group) {
+		if g.isBuiltinHelp {
+			return
+		}
+
+		for _, opt := range g.options {
+			if opt.canCli() {
+				ret = true
+			}
+		}
+	})
+
+	return ret
+}
+
+func (c *Command) fillParseState(s *parseState) {
+	s.positional = make([]*Arg, len(c.args))
+	copy(s.positional, c.args)
+
+	s.lookup = c.makeLookup()
+	s.command = c
+}
diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go
new file mode 100644
index 0000000..7a7a08b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/completion.go
@@ -0,0 +1,309 @@
+package flags
+
+import (
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// Completion is a type containing information of a completion.
+type Completion struct {
+	// The completed item
+	Item string
+
+	// A description of the completed item (optional)
+	Description string
+}
+
+type completions []Completion
+
+func (c completions) Len() int {
+	return len(c)
+}
+
+func (c completions) Less(i, j int) bool {
+	return c[i].Item < c[j].Item
+}
+
+func (c completions) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+// Completer is an interface which can be implemented by types
+// to provide custom command line argument completion.
+type Completer interface {
+	// Complete receives a prefix representing a (partial) value
+	// for its type and should provide a list of possible valid
+	// completions.
+	Complete(match string) []Completion
+}
+
+type completion struct {
+	parser *Parser
+}
+
+// Filename is a string alias which provides filename completion.
+type Filename string
+
+func completionsWithoutDescriptions(items []string) []Completion {
+	ret := make([]Completion, len(items))
+
+	for i, v := range items {
+		ret[i].Item = v
+	}
+
+	return ret
+}
+
+// Complete returns a list of existing files with the given
+// prefix.
+func (f *Filename) Complete(match string) []Completion {
+	ret, _ := filepath.Glob(match + "*")
+	return completionsWithoutDescriptions(ret)
+}
+
+func (c *completion) skipPositional(s *parseState, n int) {
+	if n >= len(s.positional) {
+		s.positional = nil
+	} else {
+		s.positional = s.positional[n:]
+	}
+}
+
+func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion {
+	if short && len(match) != 0 {
+		return []Completion{
+			Completion{
+				Item: prefix + match,
+			},
+		}
+	}
+
+	var results []Completion
+	repeats := map[string]bool{}
+
+	for name, opt := range s.lookup.longNames {
+		if strings.HasPrefix(name, match) && !opt.Hidden {
+			results = append(results, Completion{
+				Item:        defaultLongOptDelimiter + name,
+				Description: opt.Description,
+			})
+
+			if short {
+				repeats[string(opt.ShortName)] = true
+			}
+		}
+	}
+
+	if short {
+		for name, opt := range s.lookup.shortNames {
+			if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden {
+				results = append(results, Completion{
+					Item:        string(defaultShortOptDelimiter) + name,
+					Description: opt.Description,
+				})
+			}
+		}
+	}
+
+	return results
+}
+
+func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, false)
+}
+
+func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, true)
+}
+
+func (c *completion) completeCommands(s *parseState, match string) []Completion {
+	n := make([]Completion, 0, len(s.command.commands))
+
+	for _, cmd := range s.command.commands {
+		if cmd.data != c && strings.HasPrefix(cmd.Name, match) {
+			n = append(n, Completion{
+				Item:        cmd.Name,
+				Description: cmd.ShortDescription,
+			})
+		}
+	}
+
+	return n
+}
+
+func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion {
+	if value.Kind() == reflect.Slice {
+		value = reflect.New(value.Type().Elem())
+	}
+	i := value.Interface()
+
+	var ret []Completion
+
+	if cmp, ok := i.(Completer); ok {
+		ret = cmp.Complete(match)
+	} else if value.CanAddr() {
+		if cmp, ok = value.Addr().Interface().(Completer); ok {
+			ret = cmp.Complete(match)
+		}
+	}
+
+	for i, v := range ret {
+		ret[i].Item = prefix + v.Item
+	}
+
+	return ret
+}
+
+func (c *completion) complete(args []string) []Completion {
+	if len(args) == 0 {
+		args = []string{""}
+	}
+
+	s := &parseState{
+		args: args,
+	}
+
+	c.parser.fillParseState(s)
+
+	var opt *Option
+
+	for len(s.args) > 1 {
+		arg := s.pop()
+
+		if (c.parser.Options&PassDoubleDash) != None && arg == "--" {
+			opt = nil
+			c.skipPositional(s, len(s.args)-1)
+
+			break
+		}
+
+		if argumentIsOption(arg) {
+			prefix, optname, islong := stripOptionPrefix(arg)
+			optname, _, argument := splitOption(prefix, optname, islong)
+
+			if argument == nil {
+				var o *Option
+				canarg := true
+
+				if islong {
+					o = s.lookup.longNames[optname]
+				} else {
+					for i, r := range optname {
+						sname := string(r)
+						o = s.lookup.shortNames[sname]
+
+						if o == nil {
+							break
+						}
+
+						if i == 0 && o.canArgument() && len(optname) != len(sname) {
+							canarg = false
+							break
+						}
+					}
+				}
+
+				if o == nil && (c.parser.Options&PassAfterNonOption) != None {
+					opt = nil
+					c.skipPositional(s, len(s.args)-1)
+
+					break
+				} else if o != nil && o.canArgument() && !o.OptionalArgument && canarg {
+					if len(s.args) > 1 {
+						s.pop()
+					} else {
+						opt = o
+					}
+				}
+			}
+		} else {
+			if len(s.positional) > 0 {
+				if !s.positional[0].isRemaining() {
+					// Don't advance beyond a remaining positional arg (because
+					// it consumes all subsequent args).
+					s.positional = s.positional[1:]
+				}
+			} else if cmd, ok := s.lookup.commands[arg]; ok {
+				cmd.fillParseState(s)
+			}
+
+			opt = nil
+		}
+	}
+
+	lastarg := s.args[len(s.args)-1]
+	var ret []Completion
+
+	if opt != nil {
+		// Completion for the argument of 'opt'
+		ret = c.completeValue(opt.value, "", lastarg)
+	} else if argumentStartsOption(lastarg) {
+		// Complete the option
+		prefix, optname, islong := stripOptionPrefix(lastarg)
+		optname, split, argument := splitOption(prefix, optname, islong)
+
+		if argument == nil && !islong {
+			rname, n := utf8.DecodeRuneInString(optname)
+			sname := string(rname)
+
+			if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() {
+				ret = c.completeValue(opt.value, prefix+sname, optname[n:])
+			} else {
+				ret = c.completeNamesForShortPrefix(s, prefix, optname)
+			}
+		} else if argument != nil {
+			if islong {
+				opt = s.lookup.longNames[optname]
+			} else {
+				opt = s.lookup.shortNames[optname]
+			}
+
+			if opt != nil {
+				ret = c.completeValue(opt.value, prefix+optname+split, *argument)
+			}
+		} else if islong {
+			ret = c.completeNamesForLongPrefix(s, prefix, optname)
+		} else {
+			ret = c.completeNamesForShortPrefix(s, prefix, optname)
+		}
+	} else if len(s.positional) > 0 {
+		// Complete for positional argument
+		ret = c.completeValue(s.positional[0].value, "", lastarg)
+	} else if len(s.command.commands) > 0 {
+		// Complete for command
+		ret = c.completeCommands(s, lastarg)
+	}
+
+	sort.Sort(completions(ret))
+	return ret
+}
+
+func (c *completion) print(items []Completion, showDescriptions bool) {
+	if showDescriptions && len(items) > 1 {
+		maxl := 0
+
+		for _, v := range items {
+			if len(v.Item) > maxl {
+				maxl = len(v.Item)
+			}
+		}
+
+		for _, v := range items {
+			fmt.Printf("%s", v.Item)
+
+			if len(v.Description) > 0 {
+				fmt.Printf("%s  # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description)
+			}
+
+			fmt.Printf("\n")
+		}
+	} else {
+		for _, v := range items {
+			fmt.Println(v.Item)
+		}
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go
new file mode 100644
index 0000000..984aac8
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/convert.go
@@ -0,0 +1,348 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// to a string representation of the flag.
+type Marshaler interface {
+	// MarshalFlag marshals a flag value to its string representation.
+	MarshalFlag() (string, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a flag
+// argument to themselves. The provided value is directly passed from the
+// command line.
+type Unmarshaler interface {
+	// UnmarshalFlag unmarshals a string value representation to the flag
+	// value (which therefore needs to be a pointer receiver).
+	UnmarshalFlag(value string) error
+}
+
+func getBase(options multiTag, base int) (int, error) {
+	sbase := options.Get("base")
+
+	var err error
+	var ivbase int64
+
+	if sbase != "" {
+		ivbase, err = strconv.ParseInt(sbase, 10, 32)
+		base = int(ivbase)
+	}
+
+	return base, err
+}
+
+func convertMarshal(val reflect.Value) (bool, string, error) {
+	// Check first for the Marshaler interface
+	if val.Type().NumMethod() > 0 && val.CanInterface() {
+		if marshaler, ok := val.Interface().(Marshaler); ok {
+			ret, err := marshaler.MarshalFlag()
+			return true, ret, err
+		}
+	}
+
+	return false, "", nil
+}
+
+func convertToString(val reflect.Value, options multiTag) (string, error) {
+	if ok, ret, err := convertMarshal(val); ok {
+		return ret, err
+	}
+
+	tp := val.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		stringer := val.Interface().(fmt.Stringer)
+		return stringer.String(), nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		return val.String(), nil
+	case reflect.Bool:
+		if val.Bool() {
+			return "true", nil
+		}
+
+		return "false", nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatInt(val.Int(), base), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatUint(val.Uint(), base), nil
+	case reflect.Float32, reflect.Float64:
+		return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil
+	case reflect.Slice:
+		if val.Len() == 0 {
+			return "", nil
+		}
+
+		ret := "["
+
+		for i := 0; i < val.Len(); i++ {
+			if i != 0 {
+				ret += ", "
+			}
+
+			item, err := convertToString(val.Index(i), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += item
+		}
+
+		return ret + "]", nil
+	case reflect.Map:
+		ret := "{"
+
+		for i, key := range val.MapKeys() {
+			if i != 0 {
+				ret += ", "
+			}
+
+			keyitem, err := convertToString(key, options)
+
+			if err != nil {
+				return "", err
+			}
+
+			item, err := convertToString(val.MapIndex(key), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += keyitem + ":" + item
+		}
+
+		return ret + "}", nil
+	case reflect.Ptr:
+		return convertToString(reflect.Indirect(val), options)
+	case reflect.Interface:
+		if !val.IsNil() {
+			return convertToString(val.Elem(), options)
+		}
+	}
+
+	return "", nil
+}
+
+func convertUnmarshal(val string, retval reflect.Value) (bool, error) {
+	if retval.Type().NumMethod() > 0 && retval.CanInterface() {
+		if unmarshaler, ok := retval.Interface().(Unmarshaler); ok {
+			if retval.IsNil() {
+				retval.Set(reflect.New(retval.Type().Elem()))
+
+				// Re-assign from the new value
+				unmarshaler = retval.Interface().(Unmarshaler)
+			}
+
+			return true, unmarshaler.UnmarshalFlag(val)
+		}
+	}
+
+	if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() {
+		return convertUnmarshal(val, retval.Addr())
+	}
+
+	if retval.Type().Kind() == reflect.Interface && !retval.IsNil() {
+		return convertUnmarshal(val, retval.Elem())
+	}
+
+	return false, nil
+}
+
+func convert(val string, retval reflect.Value, options multiTag) error {
+	if ok, err := convertUnmarshal(val, retval); ok {
+		return err
+	}
+
+	tp := retval.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		parsed, err := time.ParseDuration(val)
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(int64(parsed))
+		return nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		retval.SetString(val)
+	case reflect.Bool:
+		if val == "" {
+			retval.SetBool(true)
+		} else {
+			b, err := strconv.ParseBool(val)
+
+			if err != nil {
+				return err
+			}
+
+			retval.SetBool(b)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseInt(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(parsed)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseUint(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetUint(parsed)
+	case reflect.Float32, reflect.Float64:
+		parsed, err := strconv.ParseFloat(val, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetFloat(parsed)
+	case reflect.Slice:
+		elemtp := tp.Elem()
+
+		elemvalptr := reflect.New(elemtp)
+		elemval := reflect.Indirect(elemvalptr)
+
+		if err := convert(val, elemval, options); err != nil {
+			return err
+		}
+
+		retval.Set(reflect.Append(retval, elemval))
+	case reflect.Map:
+		parts := strings.SplitN(val, ":", 2)
+
+		key := parts[0]
+		var value string
+
+		if len(parts) == 2 {
+			value = parts[1]
+		}
+
+		keytp := tp.Key()
+		keyval := reflect.New(keytp)
+
+		if err := convert(key, keyval, options); err != nil {
+			return err
+		}
+
+		valuetp := tp.Elem()
+		valueval := reflect.New(valuetp)
+
+		if err := convert(value, valueval, options); err != nil {
+			return err
+		}
+
+		if retval.IsNil() {
+			retval.Set(reflect.MakeMap(tp))
+		}
+
+		retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval))
+	case reflect.Ptr:
+		if retval.IsNil() {
+			retval.Set(reflect.New(retval.Type().Elem()))
+		}
+
+		return convert(val, reflect.Indirect(retval), options)
+	case reflect.Interface:
+		if !retval.IsNil() {
+			return convert(val, retval.Elem(), options)
+		}
+	}
+
+	return nil
+}
+
+func isPrint(s string) bool {
+	for _, c := range s {
+		if !strconv.IsPrint(c) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func quoteIfNeeded(s string) string {
+	if !isPrint(s) {
+		return strconv.Quote(s)
+	}
+
+	return s
+}
+
+func quoteIfNeededV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = quoteIfNeeded(v)
+	}
+
+	return ret
+}
+
+func quoteV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = strconv.Quote(v)
+	}
+
+	return ret
+}
+
+func unquoteIfPossible(s string) (string, error) {
+	if len(s) == 0 || s[0] != '"' {
+		return s, nil
+	}
+
+	return strconv.Unquote(s)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go
new file mode 100644
index 0000000..05528d8
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/error.go
@@ -0,0 +1,134 @@
+package flags
+
+import (
+	"fmt"
+)
+
+// ErrorType represents the type of error.
+type ErrorType uint
+
+const (
+	// ErrUnknown indicates a generic error.
+	ErrUnknown ErrorType = iota
+
+	// ErrExpectedArgument indicates that an argument was expected.
+	ErrExpectedArgument
+
+	// ErrUnknownFlag indicates an unknown flag.
+	ErrUnknownFlag
+
+	// ErrUnknownGroup indicates an unknown group.
+	ErrUnknownGroup
+
+	// ErrMarshal indicates a marshalling error while converting values.
+	ErrMarshal
+
+	// ErrHelp indicates that the built-in help was shown (the error
+	// contains the help message).
+	ErrHelp
+
+	// ErrNoArgumentForBool indicates that an argument was given for a
+	// boolean flag (which don't not take any arguments).
+	ErrNoArgumentForBool
+
+	// ErrRequired indicates that a required flag was not provided.
+	ErrRequired
+
+	// ErrShortNameTooLong indicates that a short flag name was specified,
+	// longer than one character.
+	ErrShortNameTooLong
+
+	// ErrDuplicatedFlag indicates that a short or long flag has been
+	// defined more than once
+	ErrDuplicatedFlag
+
+	// ErrTag indicates an error while parsing flag tags.
+	ErrTag
+
+	// ErrCommandRequired indicates that a command was required but not
+	// specified
+	ErrCommandRequired
+
+	// ErrUnknownCommand indicates that an unknown command was specified.
+	ErrUnknownCommand
+
+	// ErrInvalidChoice indicates an invalid option value which only allows
+	// a certain number of choices.
+	ErrInvalidChoice
+
+	// ErrInvalidTag indicates an invalid tag or invalid use of an existing tag
+	ErrInvalidTag
+)
+
+func (e ErrorType) String() string {
+	switch e {
+	case ErrUnknown:
+		return "unknown"
+	case ErrExpectedArgument:
+		return "expected argument"
+	case ErrUnknownFlag:
+		return "unknown flag"
+	case ErrUnknownGroup:
+		return "unknown group"
+	case ErrMarshal:
+		return "marshal"
+	case ErrHelp:
+		return "help"
+	case ErrNoArgumentForBool:
+		return "no argument for bool"
+	case ErrRequired:
+		return "required"
+	case ErrShortNameTooLong:
+		return "short name too long"
+	case ErrDuplicatedFlag:
+		return "duplicated flag"
+	case ErrTag:
+		return "tag"
+	case ErrCommandRequired:
+		return "command required"
+	case ErrUnknownCommand:
+		return "unknown command"
+	case ErrInvalidChoice:
+		return "invalid choice"
+	case ErrInvalidTag:
+		return "invalid tag"
+	}
+
+	return "unrecognized error type"
+}
+
+// Error represents a parser error. The error returned from Parse is of this
+// type. The error contains both a Type and Message.
+type Error struct {
+	// The type of error
+	Type ErrorType
+
+	// The error message
+	Message string
+}
+
+// Error returns the error's message
+func (e *Error) Error() string {
+	return e.Message
+}
+
+func newError(tp ErrorType, message string) *Error {
+	return &Error{
+		Type:    tp,
+		Message: message,
+	}
+}
+
+func newErrorf(tp ErrorType, format string, args ...interface{}) *Error {
+	return newError(tp, fmt.Sprintf(format, args...))
+}
+
+func wrapError(err error) *Error {
+	ret, ok := err.(*Error)
+
+	if !ok {
+		return newError(ErrUnknown, err.Error())
+	}
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go
new file mode 100644
index 0000000..889762d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/flags.go
@@ -0,0 +1,258 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go built-in flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+
+Supported features
+
+The following features are supported in go-flags:
+
+    Options with short names (-v)
+    Options with long names (--verbose)
+    Options with and without arguments (bool v.s. other type)
+    Options with optional arguments and default values
+    Option default values from ENVIRONMENT_VARIABLES, including slice and map values
+    Multiple option groups each containing a set of options
+    Generate and print well-formatted help message
+    Passing remaining command line arguments after -- (optional)
+    Ignoring unknown command line options (optional)
+    Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+    Supports multiple short options -aux
+    Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+    Supports same option multiple times (can store in slice or last option counts)
+    Supports maps
+    Supports function callbacks
+    Supports namespaces for (nested) option groups
+
+Additional features specific to Windows:
+    Options with short names (/v)
+    Options with long names (/verbose)
+    Windows-style options with arguments use a colon as the delimiter
+    Modify generated help message with Windows-style / options
+    Windows style options can be disabled at build time using the "forceposix"
+    build tag
+
+
+Basic usage
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+    type Options struct {
+        Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+    }
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Slice options work exactly the same as primitive type options, except that
+whenever the option is encountered, a value is appended to the slice.
+
+Map options from string to primitive type are also supported. On the command
+line, you specify the value for such an option as key:value. For example
+
+    type Options struct {
+        AuthorInfo string[string] `short:"a"`
+    }
+
+Then, the AuthorInfo map can be filled with something like
+-a name:Jesse -a "surname:van den Kieboom".
+
+Finally, for full control over the conversion between command line argument
+values and options, user defined types can choose to implement the Marshaler
+and Unmarshaler interfaces.
+
+
+Available field tags
+
+The following is a list of tags for struct fields supported by go-flags:
+
+    short:            the short name of the option (single character)
+    long:             the long name of the option
+    required:         if non empty, makes the option required to appear on the command
+                      line. If a required option is not present, the parser will
+                      return ErrRequired (optional)
+    description:      the description of the option (optional)
+    long-description: the long description of the option. Currently only
+                      displayed in generated man pages (optional)
+    no-flag:          if non-empty, this field is ignored as an option (optional)
+
+    optional:       if non-empty, makes the argument of the option optional. When an
+                    argument is optional it can only be specified using
+                    --option=argument (optional)
+    optional-value: the value of an optional option when the option occurs
+                    without an argument. This tag can be specified multiple
+                    times in the case of maps or slices (optional)
+    default:        the default value of an option. This tag can be specified
+                    multiple times in the case of slices or maps (optional)
+    default-mask:   when specified, this value will be displayed in the help
+                    instead of the actual default value. This is useful
+                    mostly for hiding otherwise sensitive information from
+                    showing up in the help. If default-mask takes the special
+                    value "-", then no default value will be shown at all
+                    (optional)
+    env:            the default value of the option is overridden from the
+                    specified environment variable, if one has been defined.
+                    (optional)
+    env-delim:      the 'env' default value from environment is split into
+                    multiple values with the given delimiter string, use with
+                    slices and maps (optional)
+    value-name:     the name of the argument value (to be shown in the help)
+                    (optional)
+    choice:         limits the values for an option to a set of values.
+                    This tag can be specified multiple times (optional)
+    hidden:         if non-empty, the option is not visible in the help or man page.
+
+    base: a base (radix) used to convert strings to integer values, the
+          default base is 10 (i.e. decimal) (optional)
+
+    ini-name:       the explicit ini option name (optional)
+    no-ini:         if non-empty this field is ignored as an ini option
+                    (optional)
+
+    group:                when specified on a struct field, makes the struct
+                          field a separate group with the given name (optional)
+    namespace:            when specified on a group struct field, the namespace
+                          gets prepended to every option's long name and
+                          subgroup's namespace of this group, separated by
+                          the parser's namespace delimiter (optional)
+    command:              when specified on a struct field, makes the struct
+                          field a (sub)command with the given name (optional)
+    subcommands-optional: when specified on a command struct field, makes
+                          any subcommands of that command optional (optional)
+    alias:                when specified on a command struct field, adds the
+                          specified name as an alias for the command. Can be
+                          be specified multiple times to add more than one
+                          alias (optional)
+    positional-args:      when specified on a field with a struct type,
+                          uses the fields of that struct to parse remaining
+                          positional command line arguments into (in order
+                          of the fields). If a field has a slice type,
+                          then all remaining arguments will be added to it.
+                          Positional arguments are optional by default,
+                          unless the "required" tag is specified together
+                          with the "positional-args" tag. The "required" tag
+                          can also be set on the individual rest argument
+                          fields, to require only the first N positional
+                          arguments. If the "required" tag is set on the
+                          rest arguments slice, then its value determines
+                          the minimum amount of rest arguments that needs to
+                          be provided (e.g. `required:"2"`) (optional)
+    positional-arg-name:  used on a field in a positional argument struct; name
+                          of the positional argument placeholder to be shown in
+                          the help (optional)
+
+Either the `short:` tag or the `long:` must be specified to make the field eligible as an
+option.
+
+
+Option groups
+
+Option groups are a simple way to semantically separate your options. All
+options in a particular group are shown together in the help under the name
+of the group. Namespaces can be used to specify option long names more
+precisely and emphasize the options affiliation to their group.
+
+There are currently three ways to specify option groups.
+
+    1. Use NewNamedParser specifying the various option groups.
+    2. Use AddGroup to add a group to an existing parser.
+    3. Add a struct field to the top-level options annotated with the
+       group:"group-name" tag.
+
+
+
+Commands
+
+The flags package also has basic support for commands. Commands are often
+used in monolithic applications that support various commands or actions.
+Take git for example, all of the add, commit, checkout, etc. are called
+commands. Using commands you can easily separate multiple functions of your
+application.
+
+There are currently two ways to specify a command.
+
+    1. Use AddCommand on an existing parser.
+    2. Add a struct field to your options struct annotated with the
+       command:"command-name" tag.
+
+The most common, idiomatic way to implement commands is to define a global
+parser instance and implement each command in a separate file. These
+command files should define a go init function which calls AddCommand on
+the global parser.
+
+When parsing ends and there is an active command and that command implements
+the Commander interface, then its Execute method will be run with the
+remaining command line arguments.
+
+Command structs can have options which become valid to parse after the
+command has been specified on the command line, in addition to the options
+of all the parent commands. I.e. considering a -v flag on the parser and an
+add command, the following are equivalent:
+
+    ./app -v add
+    ./app add -v
+
+However, if the -v flag is defined on the add command, then the first of
+the two examples above would fail since the -v flag is not defined before
+the add command.
+
+
+Completion
+
+go-flags has builtin support to provide bash completion of flags, commands
+and argument values. To use completion, the binary which uses go-flags
+can be invoked in a special environment to list completion of the current
+command line argument. It should be noted that this `executes` your application,
+and it is up to the user to make sure there are no negative side effects (for
+example from init functions).
+
+Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion
+by replacing the argument parsing routine with the completion routine which
+outputs completions for the passed arguments. The basic invocation to
+complete a set of arguments is therefore:
+
+    GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3
+
+where `completion-example` is the binary, `arg1` and `arg2` are
+the current arguments, and `arg3` (the last argument) is the argument
+to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then
+descriptions of possible completion items will also be shown, if there
+are more than 1 completion items.
+
+To use this with bash completion, a simple file can be written which
+calls the binary which supports go-flags completion:
+
+    _completion_example() {
+        # All arguments except the first one
+        args=("${COMP_WORDS[@]:1:$COMP_CWORD}")
+
+        # Only split on newlines
+        local IFS=$'\n'
+
+        # Call completion (note that the first element of COMP_WORDS is
+        # the executable itself)
+        COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}"))
+        return 0
+    }
+
+    complete -F _completion_example completion-example
+
+Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set.
+
+Customized completion for argument values is supported by implementing
+the flags.Completer interface for the argument value type. An example
+of a type which does so is the flags.Filename type, an alias of string
+allowing simple filename completion. A slice or array argument value
+whose element type implements flags.Completer will also be completed.
+*/
+package flags
diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go
new file mode 100644
index 0000000..9e057ab
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/group.go
@@ -0,0 +1,406 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"errors"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// ErrNotPointerToStruct indicates that a provided data container is not
+// a pointer to a struct. Only pointers to structs are valid data containers
+// for options.
+var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct")
+
+// Group represents an option group. Option groups can be used to logically
+// group options together under a description. Groups are only used to provide
+// more structure to options both for the user (as displayed in the help message)
+// and for you, since groups can be nested.
+type Group struct {
+	// A short description of the group. The
+	// short description is primarily used in the built-in generated help
+	// message
+	ShortDescription string
+
+	// A long description of the group. The long
+	// description is primarily used to present information on commands
+	// (Command embeds Group) in the built-in generated help and man pages.
+	LongDescription string
+
+	// The namespace of the group
+	Namespace string
+
+	// If true, the group is not displayed in the help or man page
+	Hidden bool
+
+	// The parent of the group or nil if it has no parent
+	parent interface{}
+
+	// All the options in the group
+	options []*Option
+
+	// All the subgroups
+	groups []*Group
+
+	// Whether the group represents the built-in help group
+	isBuiltinHelp bool
+
+	data interface{}
+}
+
+type scanHandler func(reflect.Value, *reflect.StructField) (bool, error)
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = g
+
+	if err := group.scan(); err != nil {
+		return nil, err
+	}
+
+	g.groups = append(g.groups, group)
+	return group, nil
+}
+
+// Groups returns the list of groups embedded in this group.
+func (g *Group) Groups() []*Group {
+	return g.groups
+}
+
+// Options returns the list of options in this group.
+func (g *Group) Options() []*Option {
+	return g.options
+}
+
+// Find locates the subgroup with the given short description and returns it.
+// If no such group can be found Find will return nil. Note that the description
+// is matched case insensitively.
+func (g *Group) Find(shortDescription string) *Group {
+	lshortDescription := strings.ToLower(shortDescription)
+
+	var ret *Group
+
+	g.eachGroup(func(gg *Group) {
+		if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription {
+			ret = gg
+		}
+	})
+
+	return ret
+}
+
+func (g *Group) findOption(matcher func(*Option) bool) (option *Option) {
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if option == nil && matcher(opt) {
+				option = opt
+			}
+		}
+	})
+
+	return option
+}
+
+// FindOptionByLongName finds an option that is part of the group, or any of its
+// subgroups, by matching its long name (including the option namespace).
+func (g *Group) FindOptionByLongName(longName string) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.LongNameWithNamespace() == longName
+	})
+}
+
+// FindOptionByShortName finds an option that is part of the group, or any of
+// its subgroups, by matching its short name.
+func (g *Group) FindOptionByShortName(shortName rune) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.ShortName == shortName
+	})
+}
+
+func newGroup(shortDescription string, longDescription string, data interface{}) *Group {
+	return &Group{
+		ShortDescription: shortDescription,
+		LongDescription:  longDescription,
+
+		data: data,
+	}
+}
+
+func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option {
+	prio := 0
+	var retopt *Option
+
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if namematch != nil && namematch(opt, name) && prio < 4 {
+				retopt = opt
+				prio = 4
+			}
+
+			if name == opt.field.Name && prio < 3 {
+				retopt = opt
+				prio = 3
+			}
+
+			if name == opt.LongNameWithNamespace() && prio < 2 {
+				retopt = opt
+				prio = 2
+			}
+
+			if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 {
+				retopt = opt
+				prio = 1
+			}
+		}
+	})
+
+	return retopt
+}
+
+func (g *Group) eachGroup(f func(*Group)) {
+	f(g)
+
+	for _, gg := range g.groups {
+		gg.eachGroup(f)
+	}
+}
+
+func isStringFalsy(s string) bool {
+	return s == "" || s == "false" || s == "no" || s == "0"
+}
+
+func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error {
+	stype := realval.Type()
+
+	if sfield != nil {
+		if ok, err := handler(realval, sfield); err != nil {
+			return err
+		} else if ok {
+			return nil
+		}
+	}
+
+	for i := 0; i < stype.NumField(); i++ {
+		field := stype.Field(i)
+
+		// PkgName is set only for non-exported fields, which we ignore
+		if field.PkgPath != "" && !field.Anonymous {
+			continue
+		}
+
+		mtag := newMultiTag(string(field.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return err
+		}
+
+		// Skip fields with the no-flag tag
+		if mtag.Get("no-flag") != "" {
+			continue
+		}
+
+		// Dive deep into structs or pointers to structs
+		kind := field.Type.Kind()
+		fld := realval.Field(i)
+
+		if kind == reflect.Struct {
+			if err := g.scanStruct(fld, &field, handler); err != nil {
+				return err
+			}
+		} else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
+			flagCountBefore := len(g.options) + len(g.groups)
+
+			if fld.IsNil() {
+				fld = reflect.New(fld.Type().Elem())
+			}
+
+			if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil {
+				return err
+			}
+
+			if len(g.options)+len(g.groups) != flagCountBefore {
+				realval.Field(i).Set(fld)
+			}
+		}
+
+		longname := mtag.Get("long")
+		shortname := mtag.Get("short")
+
+		// Need at least either a short or long name
+		if longname == "" && shortname == "" && mtag.Get("ini-name") == "" {
+			continue
+		}
+
+		short := rune(0)
+		rc := utf8.RuneCountInString(shortname)
+
+		if rc > 1 {
+			return newErrorf(ErrShortNameTooLong,
+				"short names can only be 1 character long, not `%s'",
+				shortname)
+
+		} else if rc == 1 {
+			short, _ = utf8.DecodeRuneInString(shortname)
+		}
+
+		description := mtag.Get("description")
+		def := mtag.GetMany("default")
+
+		optionalValue := mtag.GetMany("optional-value")
+		valueName := mtag.Get("value-name")
+		defaultMask := mtag.Get("default-mask")
+
+		optional := !isStringFalsy(mtag.Get("optional"))
+		required := !isStringFalsy(mtag.Get("required"))
+		choices := mtag.GetMany("choice")
+		hidden := !isStringFalsy(mtag.Get("hidden"))
+
+		option := &Option{
+			Description:      description,
+			ShortName:        short,
+			LongName:         longname,
+			Default:          def,
+			EnvDefaultKey:    mtag.Get("env"),
+			EnvDefaultDelim:  mtag.Get("env-delim"),
+			OptionalArgument: optional,
+			OptionalValue:    optionalValue,
+			Required:         required,
+			ValueName:        valueName,
+			DefaultMask:      defaultMask,
+			Choices:          choices,
+			Hidden:           hidden,
+
+			group: g,
+
+			field: field,
+			value: realval.Field(i),
+			tag:   mtag,
+		}
+
+		if option.isBool() && option.Default != nil {
+			return newErrorf(ErrInvalidTag,
+				"boolean flag `%s' may not have default values, they always default to `false' and can only be turned on",
+				option.shortAndLongName())
+		}
+
+		g.options = append(g.options, option)
+	}
+
+	return nil
+}
+
+func (g *Group) checkForDuplicateFlags() *Error {
+	shortNames := make(map[rune]*Option)
+	longNames := make(map[string]*Option)
+
+	var duplicateError *Error
+
+	g.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.LongName != "" {
+				longName := option.LongNameWithNamespace()
+
+				if otherOption, ok := longNames[longName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption)
+					return
+				}
+				longNames[longName] = option
+			}
+			if option.ShortName != 0 {
+				if otherOption, ok := shortNames[option.ShortName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption)
+					return
+				}
+				shortNames[option.ShortName] = option
+			}
+		}
+	})
+
+	return duplicateError
+}
+
+func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+	mtag := newMultiTag(string(sfield.Tag))
+
+	if err := mtag.Parse(); err != nil {
+		return true, err
+	}
+
+	subgroup := mtag.Get("group")
+
+	if len(subgroup) != 0 {
+		var ptrval reflect.Value
+
+		if realval.Kind() == reflect.Ptr {
+			ptrval = realval
+
+			if ptrval.IsNil() {
+				ptrval.Set(reflect.New(ptrval.Type()))
+			}
+		} else {
+			ptrval = realval.Addr()
+		}
+
+		description := mtag.Get("description")
+
+		group, err := g.AddGroup(subgroup, description, ptrval.Interface())
+
+		if err != nil {
+			return true, err
+		}
+
+		group.Namespace = mtag.Get("namespace")
+		group.Hidden = mtag.Get("hidden") != ""
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func (g *Group) scanType(handler scanHandler) error {
+	// Get all the public fields in the data struct
+	ptrval := reflect.ValueOf(g.data)
+
+	if ptrval.Type().Kind() != reflect.Ptr {
+		panic(ErrNotPointerToStruct)
+	}
+
+	stype := ptrval.Type().Elem()
+
+	if stype.Kind() != reflect.Struct {
+		panic(ErrNotPointerToStruct)
+	}
+
+	realval := reflect.Indirect(ptrval)
+
+	if err := g.scanStruct(realval, nil, handler); err != nil {
+		return err
+	}
+
+	if err := g.checkForDuplicateFlags(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (g *Group) scan() error {
+	return g.scanType(g.scanSubGroupHandler)
+}
+
+func (g *Group) groupByName(name string) *Group {
+	if len(name) == 0 {
+		return g
+	}
+
+	return g.Find(name)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go
new file mode 100644
index 0000000..d380305
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/help.go
@@ -0,0 +1,491 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"unicode/utf8"
+)
+
+type alignmentInfo struct {
+	maxLongLen      int
+	hasShort        bool
+	hasValueName    bool
+	terminalColumns int
+	indent          bool
+}
+
+const (
+	paddingBeforeOption                 = 2
+	distanceBetweenOptionAndDescription = 2
+)
+
+func (a *alignmentInfo) descriptionStart() int {
+	ret := a.maxLongLen + distanceBetweenOptionAndDescription
+
+	if a.hasShort {
+		ret += 2
+	}
+
+	if a.maxLongLen > 0 {
+		ret += 4
+	}
+
+	if a.hasValueName {
+		ret += 3
+	}
+
+	return ret
+}
+
+func (a *alignmentInfo) updateLen(name string, indent bool) {
+	l := utf8.RuneCountInString(name)
+
+	if indent {
+		l = l + 4
+	}
+
+	if l > a.maxLongLen {
+		a.maxLongLen = l
+	}
+}
+
+func (p *Parser) getAlignmentInfo() alignmentInfo {
+	ret := alignmentInfo{
+		maxLongLen:      0,
+		hasShort:        false,
+		hasValueName:    false,
+		terminalColumns: getTerminalColumns(),
+	}
+
+	if ret.terminalColumns <= 0 {
+		ret.terminalColumns = 80
+	}
+
+	var prevcmd *Command
+
+	p.eachActiveGroup(func(c *Command, grp *Group) {
+		if c != prevcmd {
+			for _, arg := range c.args {
+				ret.updateLen(arg.Name, c != p.Command)
+			}
+		}
+
+		for _, info := range grp.options {
+			if !info.canCli() {
+				continue
+			}
+
+			if info.ShortName != 0 {
+				ret.hasShort = true
+			}
+
+			if len(info.ValueName) > 0 {
+				ret.hasValueName = true
+			}
+
+			l := info.LongNameWithNamespace() + info.ValueName
+
+			if len(info.Choices) != 0 {
+				l += "[" + strings.Join(info.Choices, "|") + "]"
+			}
+
+			ret.updateLen(l, c != p.Command)
+		}
+	})
+
+	return ret
+}
+
+func wrapText(s string, l int, prefix string) string {
+	var ret string
+
+	if l < 10 {
+		l = 10
+	}
+
+	// Basic text wrapping of s at spaces to fit in l
+	lines := strings.Split(s, "\n")
+
+	for _, line := range lines {
+		var retline string
+
+		line = strings.TrimSpace(line)
+
+		for len(line) > l {
+			// Try to split on space
+			suffix := ""
+
+			pos := strings.LastIndex(line[:l], " ")
+
+			if pos < 0 {
+				pos = l - 1
+				suffix = "-\n"
+			}
+
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += strings.TrimSpace(line[:pos]) + suffix
+			line = strings.TrimSpace(line[pos:])
+		}
+
+		if len(line) > 0 {
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += line
+		}
+
+		if len(ret) > 0 {
+			ret += "\n"
+
+			if len(retline) > 0 {
+				ret += prefix
+			}
+		}
+
+		ret += retline
+	}
+
+	return ret
+}
+
+func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) {
+	line := &bytes.Buffer{}
+
+	prefix := paddingBeforeOption
+
+	if info.indent {
+		prefix += 4
+	}
+
+	if option.Hidden {
+		return
+	}
+
+	line.WriteString(strings.Repeat(" ", prefix))
+
+	if option.ShortName != 0 {
+		line.WriteRune(defaultShortOptDelimiter)
+		line.WriteRune(option.ShortName)
+	} else if info.hasShort {
+		line.WriteString("  ")
+	}
+
+	descstart := info.descriptionStart() + paddingBeforeOption
+
+	if len(option.LongName) > 0 {
+		if option.ShortName != 0 {
+			line.WriteString(", ")
+		} else if info.hasShort {
+			line.WriteString("  ")
+		}
+
+		line.WriteString(defaultLongOptDelimiter)
+		line.WriteString(option.LongNameWithNamespace())
+	}
+
+	if option.canArgument() {
+		line.WriteRune(defaultNameArgDelimiter)
+
+		if len(option.ValueName) > 0 {
+			line.WriteString(option.ValueName)
+		}
+
+		if len(option.Choices) > 0 {
+			line.WriteString("[" + strings.Join(option.Choices, "|") + "]")
+		}
+	}
+
+	written := line.Len()
+	line.WriteTo(writer)
+
+	if option.Description != "" {
+		dw := descstart - written
+		writer.WriteString(strings.Repeat(" ", dw))
+
+		var def string
+
+		if len(option.DefaultMask) != 0 {
+			if option.DefaultMask != "-" {
+				def = option.DefaultMask
+			}
+		} else {
+			def = option.defaultLiteral
+		}
+
+		var envDef string
+		if option.EnvDefaultKey != "" {
+			var envPrintable string
+			if runtime.GOOS == "windows" {
+				envPrintable = "%" + option.EnvDefaultKey + "%"
+			} else {
+				envPrintable = "$" + option.EnvDefaultKey
+			}
+			envDef = fmt.Sprintf(" [%s]", envPrintable)
+		}
+
+		var desc string
+
+		if def != "" {
+			desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef)
+		} else {
+			desc = option.Description + envDef
+		}
+
+		writer.WriteString(wrapText(desc,
+			info.terminalColumns-descstart,
+			strings.Repeat(" ", descstart)))
+	}
+
+	writer.WriteString("\n")
+}
+
+func maxCommandLength(s []*Command) int {
+	if len(s) == 0 {
+		return 0
+	}
+
+	ret := len(s[0].Name)
+
+	for _, v := range s[1:] {
+		l := len(v.Name)
+
+		if l > ret {
+			ret = l
+		}
+	}
+
+	return ret
+}
+
+// WriteHelp writes a help message containing all the possible options and
+// their descriptions to the provided writer. Note that the HelpFlag parser
+// option provides a convenient way to add a -h/--help option group to the
+// command line parser which will automatically show the help messages using
+// this method.
+func (p *Parser) WriteHelp(writer io.Writer) {
+	if writer == nil {
+		return
+	}
+
+	wr := bufio.NewWriter(writer)
+	aligninfo := p.getAlignmentInfo()
+
+	cmd := p.Command
+
+	for cmd.Active != nil {
+		cmd = cmd.Active
+	}
+
+	if p.Name != "" {
+		wr.WriteString("Usage:\n")
+		wr.WriteString(" ")
+
+		allcmd := p.Command
+
+		for allcmd != nil {
+			var usage string
+
+			if allcmd == p.Command {
+				if len(p.Usage) != 0 {
+					usage = p.Usage
+				} else if p.Options&HelpFlag != 0 {
+					usage = "[OPTIONS]"
+				}
+			} else if us, ok := allcmd.data.(Usage); ok {
+				usage = us.Usage()
+			} else if allcmd.hasCliOptions() {
+				usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name)
+			}
+
+			if len(usage) != 0 {
+				fmt.Fprintf(wr, " %s %s", allcmd.Name, usage)
+			} else {
+				fmt.Fprintf(wr, " %s", allcmd.Name)
+			}
+
+			if len(allcmd.args) > 0 {
+				fmt.Fprintf(wr, " ")
+			}
+
+			for i, arg := range allcmd.args {
+				if i != 0 {
+					fmt.Fprintf(wr, " ")
+				}
+
+				name := arg.Name
+
+				if arg.isRemaining() {
+					name = name + "..."
+				}
+
+				if !allcmd.ArgsRequired {
+					fmt.Fprintf(wr, "[%s]", name)
+				} else {
+					fmt.Fprintf(wr, "%s", name)
+				}
+			}
+
+			if allcmd.Active == nil && len(allcmd.commands) > 0 {
+				var co, cc string
+
+				if allcmd.SubcommandsOptional {
+					co, cc = "[", "]"
+				} else {
+					co, cc = "<", ">"
+				}
+
+				visibleCommands := allcmd.visibleCommands()
+
+				if len(visibleCommands) > 3 {
+					fmt.Fprintf(wr, " %scommand%s", co, cc)
+				} else {
+					subcommands := allcmd.sortedVisibleCommands()
+					names := make([]string, len(subcommands))
+
+					for i, subc := range subcommands {
+						names[i] = subc.Name
+					}
+
+					fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc)
+				}
+			}
+
+			allcmd = allcmd.Active
+		}
+
+		fmt.Fprintln(wr)
+
+		if len(cmd.LongDescription) != 0 {
+			fmt.Fprintln(wr)
+
+			t := wrapText(cmd.LongDescription,
+				aligninfo.terminalColumns,
+				"")
+
+			fmt.Fprintln(wr, t)
+		}
+	}
+
+	c := p.Command
+
+	for c != nil {
+		printcmd := c != p.Command
+
+		c.eachGroup(func(grp *Group) {
+			first := true
+
+			// Skip built-in help group for all commands except the top-level
+			// parser
+			if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) {
+				return
+			}
+
+			for _, info := range grp.options {
+				if !info.canCli() || info.Hidden {
+					continue
+				}
+
+				if printcmd {
+					fmt.Fprintf(wr, "\n[%s command options]\n", c.Name)
+					aligninfo.indent = true
+					printcmd = false
+				}
+
+				if first && cmd.Group != grp {
+					fmt.Fprintln(wr)
+
+					if aligninfo.indent {
+						wr.WriteString("    ")
+					}
+
+					fmt.Fprintf(wr, "%s:\n", grp.ShortDescription)
+					first = false
+				}
+
+				p.writeHelpOption(wr, info, aligninfo)
+			}
+		})
+
+		var args []*Arg
+		for _, arg := range c.args {
+			if arg.Description != "" {
+				args = append(args, arg)
+			}
+		}
+
+		if len(args) > 0 {
+			if c == p.Command {
+				fmt.Fprintf(wr, "\nArguments:\n")
+			} else {
+				fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name)
+			}
+
+			descStart := aligninfo.descriptionStart() + paddingBeforeOption
+
+			for _, arg := range args {
+				argPrefix := strings.Repeat(" ", paddingBeforeOption)
+				argPrefix += arg.Name
+
+				if len(arg.Description) > 0 {
+					argPrefix += ":"
+					wr.WriteString(argPrefix)
+
+					// Space between "arg:" and the description start
+					descPadding := strings.Repeat(" ", descStart-len(argPrefix))
+					// How much space the description gets before wrapping
+					descWidth := aligninfo.terminalColumns - 1 - descStart
+					// Whitespace to which we can indent new description lines
+					descPrefix := strings.Repeat(" ", descStart)
+
+					wr.WriteString(descPadding)
+					wr.WriteString(wrapText(arg.Description, descWidth, descPrefix))
+				} else {
+					wr.WriteString(argPrefix)
+				}
+
+				fmt.Fprintln(wr)
+			}
+		}
+
+		c = c.Active
+	}
+
+	scommands := cmd.sortedVisibleCommands()
+
+	if len(scommands) > 0 {
+		maxnamelen := maxCommandLength(scommands)
+
+		fmt.Fprintln(wr)
+		fmt.Fprintln(wr, "Available commands:")
+
+		for _, c := range scommands {
+			fmt.Fprintf(wr, "  %s", c.Name)
+
+			if len(c.ShortDescription) > 0 {
+				pad := strings.Repeat(" ", maxnamelen-len(c.Name))
+				fmt.Fprintf(wr, "%s  %s", pad, c.ShortDescription)
+
+				if len(c.Aliases) > 0 {
+					fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", "))
+				}
+
+			}
+
+			fmt.Fprintln(wr)
+		}
+	}
+
+	wr.Flush()
+}
diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go
new file mode 100644
index 0000000..e714d3d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/ini.go
@@ -0,0 +1,597 @@
+package flags
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// IniError contains location information on where an error occurred.
+type IniError struct {
+	// The error message.
+	Message string
+
+	// The filename of the file in which the error occurred.
+	File string
+
+	// The line number at which the error occurred.
+	LineNumber uint
+}
+
+// Error provides a "file:line: message" formatted message of the ini error.
+func (x *IniError) Error() string {
+	return fmt.Sprintf(
+		"%s:%d: %s",
+		x.File,
+		x.LineNumber,
+		x.Message,
+	)
+}
+
+// IniOptions for writing
+type IniOptions uint
+
+const (
+	// IniNone indicates no options.
+	IniNone IniOptions = 0
+
+	// IniIncludeDefaults indicates that default values should be written.
+	IniIncludeDefaults = 1 << iota
+
+	// IniCommentDefaults indicates that if IniIncludeDefaults is used
+	// options with default values are written but commented out.
+	IniCommentDefaults
+
+	// IniIncludeComments indicates that comments containing the description
+	// of an option should be written.
+	IniIncludeComments
+
+	// IniDefault provides a default set of options.
+	IniDefault = IniIncludeComments
+)
+
+// IniParser is a utility to read and write flags options from and to ini
+// formatted strings.
+type IniParser struct {
+	ParseAsDefaults bool // override default flags
+
+	parser *Parser
+}
+
+type iniValue struct {
+	Name       string
+	Value      string
+	Quoted     bool
+	LineNumber uint
+}
+
+type iniSection []iniValue
+
+type ini struct {
+	File     string
+	Sections map[string]iniSection
+}
+
+// NewIniParser creates a new ini parser for a given Parser.
+func NewIniParser(p *Parser) *IniParser {
+	return &IniParser{
+		parser: p,
+	}
+}
+
+// IniParse is a convenience function to parse command line options with default
+// settings from an ini formatted file. The provided data is a pointer to a struct
+// representing the default option group (named "Application Options"). For
+// more control, use flags.NewParser.
+func IniParse(filename string, data interface{}) error {
+	p := NewParser(data, Default)
+
+	return NewIniParser(p).ParseFile(filename)
+}
+
+// ParseFile parses flags from an ini formatted file. See Parse for more
+// information on the ini file format. The returned errors can be of the type
+// flags.Error or flags.IniError.
+func (i *IniParser) ParseFile(filename string) error {
+	ini, err := readIniFromFile(filename)
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// Parse parses flags from an ini format. You can use ParseFile as a
+// convenience function to parse from a filename instead of a general
+// io.Reader.
+//
+// The format of the ini file is as follows:
+//
+//     [Option group name]
+//     option = value
+//
+// Each section in the ini file represents an option group or command in the
+// flags parser. The default flags parser option group (i.e. when using
+// flags.Parse) is named 'Application Options'. The ini option name is matched
+// in the following order:
+//
+//     1. Compared to the ini-name tag on the option struct field (if present)
+//     2. Compared to the struct field name
+//     3. Compared to the option long name (if present)
+//     4. Compared to the option short name (if present)
+//
+// Sections for nested groups and commands can be addressed using a dot `.'
+// namespacing notation (i.e [subcommand.Options]). Group section names are
+// matched case insensitive.
+//
+// The returned errors can be of the type flags.Error or flags.IniError.
+func (i *IniParser) Parse(reader io.Reader) error {
+	ini, err := readIni(reader, "")
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// WriteFile writes the flags as ini format into a file. See Write
+// for more information. The returned error occurs when the specified file
+// could not be opened for writing.
+func (i *IniParser) WriteFile(filename string, options IniOptions) error {
+	return writeIniToFile(i, filename, options)
+}
+
+// Write writes the current values of all the flags to an ini format.
+// See Parse for more information on the ini file format. You typically
+// call this only after settings have been parsed since the default values of each
+// option are stored just before parsing the flags (this is only relevant when
+// IniIncludeDefaults is _not_ set in options).
+func (i *IniParser) Write(writer io.Writer, options IniOptions) {
+	writeIni(i, writer, options)
+}
+
+func readFullLine(reader *bufio.Reader) (string, error) {
+	var line []byte
+
+	for {
+		l, more, err := reader.ReadLine()
+
+		if err != nil {
+			return "", err
+		}
+
+		if line == nil && !more {
+			return string(l), nil
+		}
+
+		line = append(line, l...)
+
+		if !more {
+			break
+		}
+	}
+
+	return string(line), nil
+}
+
+func optionIniName(option *Option) string {
+	name := option.tag.Get("_read-ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	name = option.tag.Get("ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	return option.field.Name
+}
+
+func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) {
+	var sname string
+
+	if len(namespace) != 0 {
+		sname = namespace
+	}
+
+	if cmd.Group != group && len(group.ShortDescription) != 0 {
+		if len(sname) != 0 {
+			sname += "."
+		}
+
+		sname += group.ShortDescription
+	}
+
+	sectionwritten := false
+	comments := (options & IniIncludeComments) != IniNone
+
+	for _, option := range group.options {
+		if option.isFunc() || option.Hidden {
+			continue
+		}
+
+		if len(option.tag.Get("no-ini")) != 0 {
+			continue
+		}
+
+		val := option.value
+
+		if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() {
+			continue
+		}
+
+		if !sectionwritten {
+			fmt.Fprintf(writer, "[%s]\n", sname)
+			sectionwritten = true
+		}
+
+		if comments && len(option.Description) != 0 {
+			fmt.Fprintf(writer, "; %s\n", option.Description)
+		}
+
+		oname := optionIniName(option)
+
+		commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault()
+
+		kind := val.Type().Kind()
+		switch kind {
+		case reflect.Slice:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				for idx := 0; idx < val.Len(); idx++ {
+					v, _ := convertToString(val.Index(idx), option.tag)
+
+					writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+				}
+			}
+		case reflect.Map:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				mkeys := val.MapKeys()
+				keys := make([]string, len(val.MapKeys()))
+				kkmap := make(map[string]reflect.Value)
+
+				for i, k := range mkeys {
+					keys[i], _ = convertToString(k, option.tag)
+					kkmap[keys[i]] = k
+				}
+
+				sort.Strings(keys)
+
+				for _, k := range keys {
+					v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag)
+
+					writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote)
+				}
+			}
+		default:
+			v, _ := convertToString(val, option.tag)
+
+			writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+		}
+
+		if comments {
+			fmt.Fprintln(writer)
+		}
+	}
+
+	if sectionwritten && !comments {
+		fmt.Fprintln(writer)
+	}
+}
+
+func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) {
+	if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) {
+		optionValue = strconv.Quote(optionValue)
+	}
+
+	comment := ""
+	if commentOption {
+		comment = "; "
+	}
+
+	fmt.Fprintf(writer, "%s%s =", comment, optionName)
+
+	if optionKey != "" {
+		fmt.Fprintf(writer, " %s:%s", optionKey, optionValue)
+	} else if optionValue != "" {
+		fmt.Fprintf(writer, " %s", optionValue)
+	}
+
+	fmt.Fprintln(writer)
+}
+
+func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) {
+	command.eachGroup(func(group *Group) {
+		if !group.Hidden {
+			writeGroupIni(command, group, namespace, writer, options)
+		}
+	})
+
+	for _, c := range command.commands {
+		var nns string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(namespace) != 0 {
+			nns = c.Name + "." + nns
+		} else {
+			nns = c.Name
+		}
+
+		writeCommandIni(c, nns, writer, options)
+	}
+}
+
+func writeIni(parser *IniParser, writer io.Writer, options IniOptions) {
+	writeCommandIni(parser.parser.Command, "", writer, options)
+}
+
+func writeIniToFile(parser *IniParser, filename string, options IniOptions) error {
+	file, err := os.Create(filename)
+
+	if err != nil {
+		return err
+	}
+
+	defer file.Close()
+
+	writeIni(parser, file, options)
+
+	return nil
+}
+
+func readIniFromFile(filename string) (*ini, error) {
+	file, err := os.Open(filename)
+
+	if err != nil {
+		return nil, err
+	}
+
+	defer file.Close()
+
+	return readIni(file, filename)
+}
+
+func readIni(contents io.Reader, filename string) (*ini, error) {
+	ret := &ini{
+		File:     filename,
+		Sections: make(map[string]iniSection),
+	}
+
+	reader := bufio.NewReader(contents)
+
+	// Empty global section
+	section := make(iniSection, 0, 10)
+	sectionname := ""
+
+	ret.Sections[sectionname] = section
+
+	var lineno uint
+
+	for {
+		line, err := readFullLine(reader)
+
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, err
+		}
+
+		lineno++
+		line = strings.TrimSpace(line)
+
+		// Skip empty lines and lines starting with ; (comments)
+		if len(line) == 0 || line[0] == ';' || line[0] == '#' {
+			continue
+		}
+
+		if line[0] == '[' {
+			if line[0] != '[' || line[len(line)-1] != ']' {
+				return nil, &IniError{
+					Message:    "malformed section header",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			name := strings.TrimSpace(line[1 : len(line)-1])
+
+			if len(name) == 0 {
+				return nil, &IniError{
+					Message:    "empty section name",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			sectionname = name
+			section = ret.Sections[name]
+
+			if section == nil {
+				section = make(iniSection, 0, 10)
+				ret.Sections[name] = section
+			}
+
+			continue
+		}
+
+		// Parse option here
+		keyval := strings.SplitN(line, "=", 2)
+
+		if len(keyval) != 2 {
+			return nil, &IniError{
+				Message:    fmt.Sprintf("malformed key=value (%s)", line),
+				File:       filename,
+				LineNumber: lineno,
+			}
+		}
+
+		name := strings.TrimSpace(keyval[0])
+		value := strings.TrimSpace(keyval[1])
+		quoted := false
+
+		if len(value) != 0 && value[0] == '"' {
+			if v, err := strconv.Unquote(value); err == nil {
+				value = v
+
+				quoted = true
+			} else {
+				return nil, &IniError{
+					Message:    err.Error(),
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+		}
+
+		section = append(section, iniValue{
+			Name:       name,
+			Value:      value,
+			Quoted:     quoted,
+			LineNumber: lineno,
+		})
+
+		ret.Sections[sectionname] = section
+	}
+
+	return ret, nil
+}
+
+func (i *IniParser) matchingGroups(name string) []*Group {
+	if len(name) == 0 {
+		var ret []*Group
+
+		i.parser.eachGroup(func(g *Group) {
+			ret = append(ret, g)
+		})
+
+		return ret
+	}
+
+	g := i.parser.groupByName(name)
+
+	if g != nil {
+		return []*Group{g}
+	}
+
+	return nil
+}
+
+func (i *IniParser) parse(ini *ini) error {
+	p := i.parser
+
+	var quotesLookup = make(map[*Option]bool)
+
+	for name, section := range ini.Sections {
+		groups := i.matchingGroups(name)
+
+		if len(groups) == 0 {
+			return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+		}
+
+		for _, inival := range section {
+			var opt *Option
+
+			for _, group := range groups {
+				opt = group.optionByName(inival.Name, func(o *Option, n string) bool {
+					return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n)
+				})
+
+				if opt != nil && len(opt.tag.Get("no-ini")) != 0 {
+					opt = nil
+				}
+
+				if opt != nil {
+					break
+				}
+			}
+
+			if opt == nil {
+				if (p.Options & IgnoreUnknown) == None {
+					return &IniError{
+						Message:    fmt.Sprintf("unknown option: %s", inival.Name),
+						File:       ini.File,
+						LineNumber: inival.LineNumber,
+					}
+				}
+
+				continue
+			}
+
+			// ini value is ignored if override is set and
+			// value was previously set from non default
+			if i.ParseAsDefaults && !opt.isSetDefault {
+				continue
+			}
+
+			pval := &inival.Value
+
+			if !opt.canArgument() && len(inival.Value) == 0 {
+				pval = nil
+			} else {
+				if opt.value.Type().Kind() == reflect.Map {
+					parts := strings.SplitN(inival.Value, ":", 2)
+
+					// only handle unquoting
+					if len(parts) == 2 && parts[1][0] == '"' {
+						if v, err := strconv.Unquote(parts[1]); err == nil {
+							parts[1] = v
+
+							inival.Quoted = true
+						} else {
+							return &IniError{
+								Message:    err.Error(),
+								File:       ini.File,
+								LineNumber: inival.LineNumber,
+							}
+						}
+
+						s := parts[0] + ":" + parts[1]
+
+						pval = &s
+					}
+				}
+			}
+
+			if err := opt.set(pval); err != nil {
+				return &IniError{
+					Message:    err.Error(),
+					File:       ini.File,
+					LineNumber: inival.LineNumber,
+				}
+			}
+
+			// either all INI values are quoted or only values who need quoting
+			if _, ok := quotesLookup[opt]; !inival.Quoted || !ok {
+				quotesLookup[opt] = inival.Quoted
+			}
+
+			opt.tag.Set("_read-ini-name", inival.Name)
+		}
+	}
+
+	for opt, quoted := range quotesLookup {
+		opt.iniQuote = quoted
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go
new file mode 100644
index 0000000..0cb114e
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/man.go
@@ -0,0 +1,205 @@
+package flags
+
+import (
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"time"
+)
+
+func manQuote(s string) string {
+	return strings.Replace(s, "\\", "\\\\", -1)
+}
+
+func formatForMan(wr io.Writer, s string) {
+	for {
+		idx := strings.IndexRune(s, '`')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", manQuote(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "%s", manQuote(s[:idx]))
+
+		s = s[idx+1:]
+		idx = strings.IndexRune(s, '\'')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", manQuote(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx]))
+		s = s[idx+1:]
+	}
+}
+
+func writeManPageOptions(wr io.Writer, grp *Group) {
+	grp.eachGroup(func(group *Group) {
+		if group.Hidden || len(group.options) == 0 {
+			return
+		}
+
+		// If the parent (grp) has any subgroups, display their descriptions as
+		// subsection headers similar to the output of --help.
+		if group.ShortDescription != "" && len(grp.groups) > 0 {
+			fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription)
+
+			if group.LongDescription != "" {
+				formatForMan(wr, group.LongDescription)
+				fmt.Fprintln(wr, "")
+			}
+		}
+
+		for _, opt := range group.options {
+			if !opt.canCli() || opt.Hidden {
+				continue
+			}
+
+			fmt.Fprintln(wr, ".TP")
+			fmt.Fprintf(wr, "\\fB")
+
+			if opt.ShortName != 0 {
+				fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName)
+			}
+
+			if len(opt.LongName) != 0 {
+				if opt.ShortName != 0 {
+					fmt.Fprintf(wr, ", ")
+				}
+
+				fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace()))
+			}
+
+			if len(opt.ValueName) != 0 || opt.OptionalArgument {
+				if opt.OptionalArgument {
+					fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", ")))
+				} else {
+					fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName))
+				}
+			}
+
+			if len(opt.Default) != 0 {
+				fmt.Fprintf(wr, " <default: \\fI%s\\fR>", manQuote(strings.Join(quoteV(opt.Default), ", ")))
+			} else if len(opt.EnvDefaultKey) != 0 {
+				if runtime.GOOS == "windows" {
+					fmt.Fprintf(wr, " <default: \\fI%%%s%%\\fR>", manQuote(opt.EnvDefaultKey))
+				} else {
+					fmt.Fprintf(wr, " <default: \\fI$%s\\fR>", manQuote(opt.EnvDefaultKey))
+				}
+			}
+
+			if opt.Required {
+				fmt.Fprintf(wr, " (\\fIrequired\\fR)")
+			}
+
+			fmt.Fprintln(wr, "\\fP")
+
+			if len(opt.Description) != 0 {
+				formatForMan(wr, opt.Description)
+				fmt.Fprintln(wr, "")
+			}
+		}
+	})
+}
+
+func writeManPageSubcommands(wr io.Writer, name string, root *Command) {
+	commands := root.sortedVisibleCommands()
+
+	for _, c := range commands {
+		var nn string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(name) != 0 {
+			nn = name + " " + c.Name
+		} else {
+			nn = c.Name
+		}
+
+		writeManPageCommand(wr, nn, root, c)
+	}
+}
+
+func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) {
+	fmt.Fprintf(wr, ".SS %s\n", name)
+	fmt.Fprintln(wr, command.ShortDescription)
+
+	if len(command.LongDescription) > 0 {
+		fmt.Fprintln(wr, "")
+
+		cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name))
+
+		if strings.HasPrefix(command.LongDescription, cmdstart) {
+			fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name))
+
+			formatForMan(wr, command.LongDescription[len(cmdstart):])
+			fmt.Fprintln(wr, "")
+		} else {
+			formatForMan(wr, command.LongDescription)
+			fmt.Fprintln(wr, "")
+		}
+	}
+
+	var usage string
+	if us, ok := command.data.(Usage); ok {
+		usage = us.Usage()
+	} else if command.hasCliOptions() {
+		usage = fmt.Sprintf("[%s-OPTIONS]", command.Name)
+	}
+
+	var pre string
+	if root.hasCliOptions() {
+		pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name)
+	} else {
+		pre = fmt.Sprintf("%s %s", root.Name, command.Name)
+	}
+
+	if len(usage) > 0 {
+		fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage))
+	}
+
+	if len(command.Aliases) > 0 {
+		fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", ")))
+	}
+
+	writeManPageOptions(wr, command.Group)
+	writeManPageSubcommands(wr, name, command)
+}
+
+// WriteManPage writes a basic man page in groff format to the specified
+// writer.
+func (p *Parser) WriteManPage(wr io.Writer) {
+	t := time.Now()
+
+	fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006"))
+	fmt.Fprintln(wr, ".SH NAME")
+	fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription))
+	fmt.Fprintln(wr, ".SH SYNOPSIS")
+
+	usage := p.Usage
+
+	if len(usage) == 0 {
+		usage = "[OPTIONS]"
+	}
+
+	fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage))
+	fmt.Fprintln(wr, ".SH DESCRIPTION")
+
+	formatForMan(wr, p.LongDescription)
+	fmt.Fprintln(wr, "")
+
+	fmt.Fprintln(wr, ".SH OPTIONS")
+
+	writeManPageOptions(wr, p.Command.Group)
+
+	if len(p.visibleCommands()) > 0 {
+		fmt.Fprintln(wr, ".SH COMMANDS")
+
+		writeManPageSubcommands(wr, "", p.Command)
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go
new file mode 100644
index 0000000..96bb1a3
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/multitag.go
@@ -0,0 +1,140 @@
+package flags
+
+import (
+	"strconv"
+)
+
+type multiTag struct {
+	value string
+	cache map[string][]string
+}
+
+func newMultiTag(v string) multiTag {
+	return multiTag{
+		value: v,
+	}
+}
+
+func (x *multiTag) scan() (map[string][]string, error) {
+	v := x.value
+
+	ret := make(map[string][]string)
+
+	// This is mostly copied from reflect.StructTag.Get
+	for v != "" {
+		i := 0
+
+		// Skip whitespace
+		for i < len(v) && v[i] == ' ' {
+			i++
+		}
+
+		v = v[i:]
+
+		if v == "" {
+			break
+		}
+
+		// Scan to colon to find key
+		i = 0
+
+		for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' {
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value)
+		}
+
+		if v[i] != ':' {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value)
+		}
+
+		if i+1 >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value)
+		}
+
+		if v[i+1] != '"' {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value)
+		}
+
+		name := v[:i]
+		v = v[i+1:]
+
+		// Scan quoted string to find value
+		i = 1
+
+		for i < len(v) && v[i] != '"' {
+			if v[i] == '\n' {
+				return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value)
+			}
+
+			if v[i] == '\\' {
+				i++
+			}
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value)
+		}
+
+		val, err := strconv.Unquote(v[:i+1])
+
+		if err != nil {
+			return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value)
+		}
+
+		v = v[i+1:]
+
+		ret[name] = append(ret[name], val)
+	}
+
+	return ret, nil
+}
+
+func (x *multiTag) Parse() error {
+	vals, err := x.scan()
+	x.cache = vals
+
+	return err
+}
+
+func (x *multiTag) cached() map[string][]string {
+	if x.cache == nil {
+		cache, _ := x.scan()
+
+		if cache == nil {
+			cache = make(map[string][]string)
+		}
+
+		x.cache = cache
+	}
+
+	return x.cache
+}
+
+func (x *multiTag) Get(key string) string {
+	c := x.cached()
+
+	if v, ok := c[key]; ok {
+		return v[len(v)-1]
+	}
+
+	return ""
+}
+
+func (x *multiTag) GetMany(key string) []string {
+	c := x.cached()
+	return c[key]
+}
+
+func (x *multiTag) Set(key string, value string) {
+	c := x.cached()
+	c[key] = []string{value}
+}
+
+func (x *multiTag) SetMany(key string, value []string) {
+	c := x.cached()
+	c[key] = value
+}
diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go
new file mode 100644
index 0000000..5f85250
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/option.go
@@ -0,0 +1,459 @@
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// Option flag information. Contains a description of the option, short and
+// long name as well as a default value and whether an argument for this
+// flag is optional.
+type Option struct {
+	// The description of the option flag. This description is shown
+	// automatically in the built-in help.
+	Description string
+
+	// The short name of the option (a single character). If not 0, the
+	// option flag can be 'activated' using -<ShortName>. Either ShortName
+	// or LongName needs to be non-empty.
+	ShortName rune
+
+	// The long name of the option. If not "", the option flag can be
+	// activated using --<LongName>. Either ShortName or LongName needs
+	// to be non-empty.
+	LongName string
+
+	// The default value of the option.
+	Default []string
+
+	// The optional environment default value key name.
+	EnvDefaultKey string
+
+	// The optional delimiter string for EnvDefaultKey values.
+	EnvDefaultDelim string
+
+	// If true, specifies that the argument to an option flag is optional.
+	// When no argument to the flag is specified on the command line, the
+	// value of OptionalValue will be set in the field this option represents.
+	// This is only valid for non-boolean options.
+	OptionalArgument bool
+
+	// The optional value of the option. The optional value is used when
+	// the option flag is marked as having an OptionalArgument. This means
+	// that when the flag is specified, but no option argument is given,
+	// the value of the field this option represents will be set to
+	// OptionalValue. This is only valid for non-boolean options.
+	OptionalValue []string
+
+	// If true, the option _must_ be specified on the command line. If the
+	// option is not specified, the parser will generate an ErrRequired type
+	// error.
+	Required bool
+
+	// A name for the value of an option shown in the Help as --flag [ValueName]
+	ValueName string
+
+	// A mask value to show in the help instead of the default value. This
+	// is useful for hiding sensitive information in the help, such as
+	// passwords.
+	DefaultMask string
+
+	// If non empty, only a certain set of values is allowed for an option.
+	Choices []string
+
+	// If true, the option is not displayed in the help or man page
+	Hidden bool
+
+	// The group which the option belongs to
+	group *Group
+
+	// The struct field which the option represents.
+	field reflect.StructField
+
+	// The struct field value which the option represents.
+	value reflect.Value
+
+	// Determines if the option will be always quoted in the INI output
+	iniQuote bool
+
+	tag            multiTag
+	isSet          bool
+	isSetDefault   bool
+	preventDefault bool
+
+	defaultLiteral string
+}
+
+// LongNameWithNamespace returns the option's long name with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the long name
+// itself are separated by the parser's namespace delimiter. If the long name is
+// empty an empty string is returned.
+func (option *Option) LongNameWithNamespace() string {
+	if len(option.LongName) == 0 {
+		return ""
+	}
+
+	// fetch the namespace delimiter from the parser which is always at the
+	// end of the group hierarchy
+	namespaceDelimiter := ""
+	g := option.group
+
+	for {
+		if p, ok := g.parent.(*Parser); ok {
+			namespaceDelimiter = p.NamespaceDelimiter
+
+			break
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		}
+	}
+
+	// concatenate long name with namespace
+	longName := option.LongName
+	g = option.group
+
+	for g != nil {
+		if g.Namespace != "" {
+			longName = g.Namespace + namespaceDelimiter + longName
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		case *Parser:
+			g = nil
+		}
+	}
+
+	return longName
+}
+
+// String converts an option to a human friendly readable string describing the
+// option.
+func (option *Option) String() string {
+	var s string
+	var short string
+
+	if option.ShortName != 0 {
+		data := make([]byte, utf8.RuneLen(option.ShortName))
+		utf8.EncodeRune(data, option.ShortName)
+		short = string(data)
+
+		if len(option.LongName) != 0 {
+			s = fmt.Sprintf("%s%s, %s%s",
+				string(defaultShortOptDelimiter), short,
+				defaultLongOptDelimiter, option.LongNameWithNamespace())
+		} else {
+			s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short)
+		}
+	} else if len(option.LongName) != 0 {
+		s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace())
+	}
+
+	return s
+}
+
+// Value returns the option value as an interface{}.
+func (option *Option) Value() interface{} {
+	return option.value.Interface()
+}
+
+// Field returns the reflect struct field of the option.
+func (option *Option) Field() reflect.StructField {
+	return option.field
+}
+
+// IsSet returns true if option has been set
+func (option *Option) IsSet() bool {
+	return option.isSet
+}
+
+// IsSetDefault returns true if option has been set via the default option tag
+func (option *Option) IsSetDefault() bool {
+	return option.isSetDefault
+}
+
+// Set the value of an option to the specified value. An error will be returned
+// if the specified value could not be converted to the corresponding option
+// value type.
+func (option *Option) set(value *string) error {
+	kind := option.value.Type().Kind()
+
+	if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet {
+		option.empty()
+	}
+
+	option.isSet = true
+	option.preventDefault = true
+
+	if len(option.Choices) != 0 {
+		found := false
+
+		for _, choice := range option.Choices {
+			if choice == *value {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ")
+
+			if len(option.Choices) > 1 {
+				allowed += " or " + option.Choices[len(option.Choices)-1]
+			}
+
+			return newErrorf(ErrInvalidChoice,
+				"Invalid value `%s' for option `%s'. Allowed values are: %s",
+				*value, option, allowed)
+		}
+	}
+
+	if option.isFunc() {
+		return option.call(value)
+	} else if value != nil {
+		return convert(*value, option.value, option.tag)
+	}
+
+	return convert("", option.value, option.tag)
+}
+
+func (option *Option) canCli() bool {
+	return option.ShortName != 0 || len(option.LongName) != 0
+}
+
+func (option *Option) canArgument() bool {
+	if u := option.isUnmarshaler(); u != nil {
+		return true
+	}
+
+	return !option.isBool()
+}
+
+func (option *Option) emptyValue() reflect.Value {
+	tp := option.value.Type()
+
+	if tp.Kind() == reflect.Map {
+		return reflect.MakeMap(tp)
+	}
+
+	return reflect.Zero(tp)
+}
+
+func (option *Option) empty() {
+	if !option.isFunc() {
+		option.value.Set(option.emptyValue())
+	}
+}
+
+func (option *Option) clearDefault() {
+	usedDefault := option.Default
+
+	if envKey := option.EnvDefaultKey; envKey != "" {
+		if value, ok := os.LookupEnv(envKey); ok {
+			if option.EnvDefaultDelim != "" {
+				usedDefault = strings.Split(value,
+					option.EnvDefaultDelim)
+			} else {
+				usedDefault = []string{value}
+			}
+		}
+	}
+
+	option.isSetDefault = true
+
+	if len(usedDefault) > 0 {
+		option.empty()
+
+		for _, d := range usedDefault {
+			option.set(&d)
+			option.isSetDefault = true
+		}
+	} else {
+		tp := option.value.Type()
+
+		switch tp.Kind() {
+		case reflect.Map:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		case reflect.Slice:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		}
+	}
+}
+
+func (option *Option) valueIsDefault() bool {
+	// Check if the value of the option corresponds to its
+	// default value
+	emptyval := option.emptyValue()
+
+	checkvalptr := reflect.New(emptyval.Type())
+	checkval := reflect.Indirect(checkvalptr)
+
+	checkval.Set(emptyval)
+
+	if len(option.Default) != 0 {
+		for _, v := range option.Default {
+			convert(v, checkval, option.tag)
+		}
+	}
+
+	return reflect.DeepEqual(option.value.Interface(), checkval.Interface())
+}
+
+func (option *Option) isUnmarshaler() Unmarshaler {
+	v := option.value
+
+	for {
+		if !v.CanInterface() {
+			break
+		}
+
+		i := v.Interface()
+
+		if u, ok := i.(Unmarshaler); ok {
+			return u
+		}
+
+		if !v.CanAddr() {
+			break
+		}
+
+		v = v.Addr()
+	}
+
+	return nil
+}
+
+func (option *Option) isBool() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Bool:
+			return true
+		case reflect.Func:
+			return tp.NumIn() == 0
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isSignedNumber() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64:
+			return true
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isFunc() bool {
+	return option.value.Type().Kind() == reflect.Func
+}
+
+func (option *Option) call(value *string) error {
+	var retval []reflect.Value
+
+	if value == nil {
+		retval = option.value.Call(nil)
+	} else {
+		tp := option.value.Type().In(0)
+
+		val := reflect.New(tp)
+		val = reflect.Indirect(val)
+
+		if err := convert(*value, val, option.tag); err != nil {
+			return err
+		}
+
+		retval = option.value.Call([]reflect.Value{val})
+	}
+
+	if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() {
+		if retval[0].Interface() == nil {
+			return nil
+		}
+
+		return retval[0].Interface().(error)
+	}
+
+	return nil
+}
+
+func (option *Option) updateDefaultLiteral() {
+	defs := option.Default
+	def := ""
+
+	if len(defs) == 0 && option.canArgument() {
+		var showdef bool
+
+		switch option.field.Type.Kind() {
+		case reflect.Func, reflect.Ptr:
+			showdef = !option.value.IsNil()
+		case reflect.Slice, reflect.String, reflect.Array:
+			showdef = option.value.Len() > 0
+		case reflect.Map:
+			showdef = !option.value.IsNil() && option.value.Len() > 0
+		default:
+			zeroval := reflect.Zero(option.field.Type)
+			showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface())
+		}
+
+		if showdef {
+			def, _ = convertToString(option.value, option.tag)
+		}
+	} else if len(defs) != 0 {
+		l := len(defs) - 1
+
+		for i := 0; i < l; i++ {
+			def += quoteIfNeeded(defs[i]) + ", "
+		}
+
+		def += quoteIfNeeded(defs[l])
+	}
+
+	option.defaultLiteral = def
+}
+
+func (option *Option) shortAndLongName() string {
+	ret := &bytes.Buffer{}
+
+	if option.ShortName != 0 {
+		ret.WriteRune(defaultShortOptDelimiter)
+		ret.WriteRune(option.ShortName)
+	}
+
+	if len(option.LongName) != 0 {
+		if option.ShortName != 0 {
+			ret.WriteRune('/')
+		}
+
+		ret.WriteString(option.LongName)
+	}
+
+	return ret.String()
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
new file mode 100644
index 0000000..56dfdae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
@@ -0,0 +1,67 @@
+// +build !windows forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+const (
+	defaultShortOptDelimiter = '-'
+	defaultLongOptDelimiter  = "--"
+	defaultNameArgDelimiter  = '='
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && arg[0] == '-'
+}
+
+func argumentIsOption(arg string) bool {
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	if strings.HasPrefix(optname, "--") {
+		return "--", optname[2:], true
+	} else if strings.HasPrefix(optname, "-") {
+		return "-", optname[1:], false
+	}
+
+	return "", optname, false
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	pos := strings.Index(option, "=")
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], "=", &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	var help struct {
+		ShowHelp func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelp = showHelp
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
new file mode 100644
index 0000000..f3f28ae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
@@ -0,0 +1,108 @@
+// +build !forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+// Windows uses a front slash for both short and long options.  Also it uses
+// a colon for name/argument delimter.
+const (
+	defaultShortOptDelimiter = '/'
+	defaultLongOptDelimiter  = "/"
+	defaultNameArgDelimiter  = ':'
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/')
+}
+
+func argumentIsOption(arg string) bool {
+	// Windows-style options allow front slash for the option
+	// delimiter.
+	if len(arg) > 1 && arg[0] == '/' {
+		return true
+	}
+
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	// Determine if the argument is a long option or not.  Windows
+	// typically supports both long and short options with a single
+	// front slash as the option delimiter, so handle this situation
+	// nicely.
+	possplit := 0
+
+	if strings.HasPrefix(optname, "--") {
+		possplit = 2
+		islong = true
+	} else if strings.HasPrefix(optname, "-") {
+		possplit = 1
+		islong = false
+	} else if strings.HasPrefix(optname, "/") {
+		possplit = 1
+		islong = len(optname) > 2
+	}
+
+	return optname[:possplit], optname[possplit:], islong
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	if len(option) == 0 {
+		return option, "", nil
+	}
+
+	// Windows typically uses a colon for the option name and argument
+	// delimiter while POSIX typically uses an equals.  Support both styles,
+	// but don't allow the two to be mixed.  That is to say /foo:bar and
+	// --foo=bar are acceptable, but /foo=bar and --foo:bar are not.
+	var pos int
+	var sp string
+
+	if prefix == "/" {
+		sp = ":"
+		pos = strings.Index(option, sp)
+	} else if len(prefix) > 0 {
+		sp = "="
+		pos = strings.Index(option, sp)
+	}
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], sp, &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	// Windows CLI applications typically use /? for help, so make both
+	// that available as well as the POSIX style h and help.
+	var help struct {
+		ShowHelpWindows func() error `short:"?" description:"Show this help message"`
+		ShowHelpPosix   func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelpWindows = showHelp
+	help.ShowHelpPosix = showHelp
+
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go
new file mode 100644
index 0000000..0a7922a
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/parser.go
@@ -0,0 +1,700 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// A Parser provides command line option parsing. It can contain several
+// option groups each with their own set of options.
+type Parser struct {
+	// Embedded, see Command for more information
+	*Command
+
+	// A usage string to be displayed in the help message.
+	Usage string
+
+	// Option flags changing the behavior of the parser.
+	Options Options
+
+	// NamespaceDelimiter separates group namespaces and option long names
+	NamespaceDelimiter string
+
+	// UnknownOptionsHandler is a function which gets called when the parser
+	// encounters an unknown option. The function receives the unknown option
+	// name, a SplitArgument which specifies its value if set with an argument
+	// separator, and the remaining command line arguments.
+	// It should return a new list of remaining arguments to continue parsing,
+	// or an error to indicate a parse failure.
+	UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error)
+
+	// CompletionHandler is a function gets called to handle the completion of
+	// items. By default, the items are printed and the application is exited.
+	// You can override this default behavior by specifying a custom CompletionHandler.
+	CompletionHandler func(items []Completion)
+
+	// CommandHandler is a function that gets called to handle execution of a
+	// command. By default, the command will simply be executed. This can be
+	// overridden to perform certain actions (such as applying global flags)
+	// just before the command is executed. Note that if you override the
+	// handler it is your responsibility to call the command.Execute function.
+	//
+	// The command passed into CommandHandler may be nil in case there is no
+	// command to be executed when parsing has finished.
+	CommandHandler func(command Commander, args []string) error
+
+	internalError error
+}
+
+// SplitArgument represents the argument value of an option that was passed using
+// an argument separator.
+type SplitArgument interface {
+	// String returns the option's value as a string, and a boolean indicating
+	// if the option was present.
+	Value() (string, bool)
+}
+
+type strArgument struct {
+	value *string
+}
+
+func (s strArgument) Value() (string, bool) {
+	if s.value == nil {
+		return "", false
+	}
+
+	return *s.value, true
+}
+
+// Options provides parser options that change the behavior of the option
+// parser.
+type Options uint
+
+const (
+	// None indicates no options.
+	None Options = 0
+
+	// HelpFlag adds a default Help Options group to the parser containing
+	// -h and --help options. When either -h or --help is specified on the
+	// command line, the parser will return the special error of type
+	// ErrHelp. When PrintErrors is also specified, then the help message
+	// will also be automatically printed to os.Stdout.
+	HelpFlag = 1 << iota
+
+	// PassDoubleDash passes all arguments after a double dash, --, as
+	// remaining command line arguments (i.e. they will not be parsed for
+	// flags).
+	PassDoubleDash
+
+	// IgnoreUnknown ignores any unknown options and passes them as
+	// remaining command line arguments instead of generating an error.
+	IgnoreUnknown
+
+	// PrintErrors prints any errors which occurred during parsing to
+	// os.Stderr. In the special case of ErrHelp, the message will be printed
+	// to os.Stdout.
+	PrintErrors
+
+	// PassAfterNonOption passes all arguments after the first non option
+	// as remaining command line arguments. This is equivalent to strict
+	// POSIX processing.
+	PassAfterNonOption
+
+	// Default is a convenient default set of options which should cover
+	// most of the uses of the flags package.
+	Default = HelpFlag | PrintErrors | PassDoubleDash
+)
+
+type parseState struct {
+	arg        string
+	args       []string
+	retargs    []string
+	positional []*Arg
+	err        error
+
+	command *Command
+	lookup  lookup
+}
+
+// Parse is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). For more control, use
+// flags.NewParser.
+func Parse(data interface{}) ([]string, error) {
+	return NewParser(data, Default).Parse()
+}
+
+// ParseArgs is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). The args argument is
+// the list of command line arguments to parse. If you just want to parse the
+// default program command line arguments (i.e. os.Args), then use flags.Parse
+// instead. For more control, use flags.NewParser.
+func ParseArgs(data interface{}, args []string) ([]string, error) {
+	return NewParser(data, Default).ParseArgs(args)
+}
+
+// NewParser creates a new parser. It uses os.Args[0] as the application
+// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for
+// more details). The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"), or nil if the default
+// group should not be added. The options parameter specifies a set of options
+// for the parser.
+func NewParser(data interface{}, options Options) *Parser {
+	p := NewNamedParser(path.Base(os.Args[0]), options)
+
+	if data != nil {
+		g, err := p.AddGroup("Application Options", "", data)
+
+		if err == nil {
+			g.parent = p
+		}
+
+		p.internalError = err
+	}
+
+	return p
+}
+
+// NewNamedParser creates a new parser. The appname is used to display the
+// executable name in the built-in help message. Option groups and commands can
+// be added to this parser by using AddGroup and AddCommand.
+func NewNamedParser(appname string, options Options) *Parser {
+	p := &Parser{
+		Command:            newCommand(appname, "", "", nil),
+		Options:            options,
+		NamespaceDelimiter: ".",
+	}
+
+	p.Command.parent = p
+
+	return p
+}
+
+// Parse parses the command line arguments from os.Args using Parser.ParseArgs.
+// For more detailed information see ParseArgs.
+func (p *Parser) Parse() ([]string, error) {
+	return p.ParseArgs(os.Args[1:])
+}
+
+// ParseArgs parses the command line arguments according to the option groups that
+// were added to the parser. On successful parsing of the arguments, the
+// remaining, non-option, arguments (if any) are returned. The returned error
+// indicates a parsing error and can be used with PrintError to display
+// contextual information on where the error occurred exactly.
+//
+// When the common help group has been added (AddHelp) and either -h or --help
+// was specified in the command line arguments, a help message will be
+// automatically printed if the PrintErrors option is enabled.
+// Furthermore, the special error type ErrHelp is returned.
+// It is up to the caller to exit the program if so desired.
+func (p *Parser) ParseArgs(args []string) ([]string, error) {
+	if p.internalError != nil {
+		return nil, p.internalError
+	}
+
+	p.eachOption(func(c *Command, g *Group, option *Option) {
+		option.isSet = false
+		option.isSetDefault = false
+		option.updateDefaultLiteral()
+	})
+
+	// Add built-in help group to all commands if necessary
+	if (p.Options & HelpFlag) != None {
+		p.addHelpGroups(p.showBuiltinHelp)
+	}
+
+	compval := os.Getenv("GO_FLAGS_COMPLETION")
+
+	if len(compval) != 0 {
+		comp := &completion{parser: p}
+		items := comp.complete(args)
+
+		if p.CompletionHandler != nil {
+			p.CompletionHandler(items)
+		} else {
+			comp.print(items, compval == "verbose")
+			os.Exit(0)
+		}
+
+		return nil, nil
+	}
+
+	s := &parseState{
+		args:    args,
+		retargs: make([]string, 0, len(args)),
+	}
+
+	p.fillParseState(s)
+
+	for !s.eof() {
+		arg := s.pop()
+
+		// When PassDoubleDash is set and we encounter a --, then
+		// simply append all the rest as arguments and break out
+		if (p.Options&PassDoubleDash) != None && arg == "--" {
+			s.addArgs(s.args...)
+			break
+		}
+
+		if !argumentIsOption(arg) {
+			// Note: this also sets s.err, so we can just check for
+			// nil here and use s.err later
+			if p.parseNonOption(s) != nil {
+				break
+			}
+
+			continue
+		}
+
+		var err error
+
+		prefix, optname, islong := stripOptionPrefix(arg)
+		optname, _, argument := splitOption(prefix, optname, islong)
+
+		if islong {
+			err = p.parseLong(s, optname, argument)
+		} else {
+			err = p.parseShort(s, optname, argument)
+		}
+
+		if err != nil {
+			ignoreUnknown := (p.Options & IgnoreUnknown) != None
+			parseErr := wrapError(err)
+
+			if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) {
+				s.err = parseErr
+				break
+			}
+
+			if ignoreUnknown {
+				s.addArgs(arg)
+			} else if p.UnknownOptionHandler != nil {
+				modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args)
+
+				if err != nil {
+					s.err = err
+					break
+				}
+
+				s.args = modifiedArgs
+			}
+		}
+	}
+
+	if s.err == nil {
+		p.eachOption(func(c *Command, g *Group, option *Option) {
+			if option.preventDefault {
+				return
+			}
+
+			option.clearDefault()
+		})
+
+		s.checkRequired(p)
+	}
+
+	var reterr error
+
+	if s.err != nil {
+		reterr = s.err
+	} else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional {
+		reterr = s.estimateCommand()
+	} else if cmd, ok := s.command.data.(Commander); ok {
+		if p.CommandHandler != nil {
+			reterr = p.CommandHandler(cmd, s.retargs)
+		} else {
+			reterr = cmd.Execute(s.retargs)
+		}
+	} else if p.CommandHandler != nil {
+		reterr = p.CommandHandler(nil, s.retargs)
+	}
+
+	if reterr != nil {
+		var retargs []string
+
+		if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp {
+			retargs = append([]string{s.arg}, s.args...)
+		} else {
+			retargs = s.args
+		}
+
+		return retargs, p.printError(reterr)
+	}
+
+	return s.retargs, nil
+}
+
+func (p *parseState) eof() bool {
+	return len(p.args) == 0
+}
+
+func (p *parseState) pop() string {
+	if p.eof() {
+		return ""
+	}
+
+	p.arg = p.args[0]
+	p.args = p.args[1:]
+
+	return p.arg
+}
+
+func (p *parseState) peek() string {
+	if p.eof() {
+		return ""
+	}
+
+	return p.args[0]
+}
+
+func (p *parseState) checkRequired(parser *Parser) error {
+	c := parser.Command
+
+	var required []*Option
+
+	for c != nil {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				if !option.isSet && option.Required {
+					required = append(required, option)
+				}
+			}
+		})
+
+		c = c.Active
+	}
+
+	if len(required) == 0 {
+		if len(p.positional) > 0 {
+			var reqnames []string
+
+			for _, arg := range p.positional {
+				argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1
+
+				if !argRequired {
+					continue
+				}
+
+				if arg.isRemaining() {
+					if arg.value.Len() < arg.Required {
+						var arguments string
+
+						if arg.Required > 1 {
+							arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len())
+						} else {
+							arguments = "argument"
+						}
+
+						reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`")
+					} else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum {
+						if arg.RequiredMaximum == 0 {
+							reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`")
+						} else {
+							var arguments string
+
+							if arg.RequiredMaximum > 1 {
+								arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len())
+							} else {
+								arguments = "argument"
+							}
+
+							reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`")
+						}
+					}
+				} else {
+					reqnames = append(reqnames, "`"+arg.Name+"`")
+				}
+			}
+
+			if len(reqnames) == 0 {
+				return nil
+			}
+
+			var msg string
+
+			if len(reqnames) == 1 {
+				msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
+			} else {
+				msg = fmt.Sprintf("the required arguments %s and %s were not provided",
+					strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
+			}
+
+			p.err = newError(ErrRequired, msg)
+			return p.err
+		}
+
+		return nil
+	}
+
+	names := make([]string, 0, len(required))
+
+	for _, k := range required {
+		names = append(names, "`"+k.String()+"'")
+	}
+
+	sort.Strings(names)
+
+	var msg string
+
+	if len(names) == 1 {
+		msg = fmt.Sprintf("the required flag %s was not specified", names[0])
+	} else {
+		msg = fmt.Sprintf("the required flags %s and %s were not specified",
+			strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
+	}
+
+	p.err = newError(ErrRequired, msg)
+	return p.err
+}
+
+func (p *parseState) estimateCommand() error {
+	commands := p.command.sortedVisibleCommands()
+	cmdnames := make([]string, len(commands))
+
+	for i, v := range commands {
+		cmdnames[i] = v.Name
+	}
+
+	var msg string
+	var errtype ErrorType
+
+	if len(p.retargs) != 0 {
+		c, l := closestChoice(p.retargs[0], cmdnames)
+		msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
+		errtype = ErrUnknownCommand
+
+		if float32(l)/float32(len(c)) < 0.5 {
+			msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
+		} else if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("%s. You should use the %s command",
+				msg,
+				cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
+				msg,
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	} else {
+		errtype = ErrCommandRequired
+
+		if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("Please specify one command of: %s or %s",
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	}
+
+	return newError(errtype, msg)
+}
+
+func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
+	if !option.canArgument() {
+		if argument != nil {
+			return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option)
+		}
+
+		err = option.set(nil)
+	} else if argument != nil || (canarg && !s.eof()) {
+		var arg string
+
+		if argument != nil {
+			arg = *argument
+		} else {
+			arg = s.pop()
+
+			if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') {
+				return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg)
+			} else if p.Options&PassDoubleDash != 0 && arg == "--" {
+				return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option)
+			}
+		}
+
+		if option.tag.Get("unquote") != "false" {
+			arg, err = unquoteIfPossible(arg)
+		}
+
+		if err == nil {
+			err = option.set(&arg)
+		}
+	} else if option.OptionalArgument {
+		option.empty()
+
+		for _, v := range option.OptionalValue {
+			err = option.set(&v)
+
+			if err != nil {
+				break
+			}
+		}
+	} else {
+		err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option)
+	}
+
+	if err != nil {
+		if _, ok := err.(*Error); !ok {
+			err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s",
+				option,
+				option.value.Type(),
+				err.Error())
+		}
+	}
+
+	return err
+}
+
+func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
+	if option := s.lookup.longNames[name]; option != nil {
+		// Only long options that are required can consume an argument
+		// from the argument list
+		canarg := !option.OptionalArgument
+
+		return p.parseOption(s, name, option, canarg, argument)
+	}
+
+	return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name)
+}
+
+func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
+	c, n := utf8.DecodeRuneInString(optname)
+
+	if n == len(optname) {
+		return optname, nil
+	}
+
+	first := string(c)
+
+	if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
+		arg := optname[n:]
+		return first, &arg
+	}
+
+	return optname, nil
+}
+
+func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
+	if argument == nil {
+		optname, argument = p.splitShortConcatArg(s, optname)
+	}
+
+	for i, c := range optname {
+		shortname := string(c)
+
+		if option := s.lookup.shortNames[shortname]; option != nil {
+			// Only the last short argument can consume an argument from
+			// the arguments list, and only if it's non optional
+			canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
+
+			if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
+				return err
+			}
+		} else {
+			return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname)
+		}
+
+		// Only the first option can have a concatted argument, so just
+		// clear argument here
+		argument = nil
+	}
+
+	return nil
+}
+
+func (p *parseState) addArgs(args ...string) error {
+	for len(p.positional) > 0 && len(args) > 0 {
+		arg := p.positional[0]
+
+		if err := convert(args[0], arg.value, arg.tag); err != nil {
+			p.err = err
+			return err
+		}
+
+		if !arg.isRemaining() {
+			p.positional = p.positional[1:]
+		}
+
+		args = args[1:]
+	}
+
+	p.retargs = append(p.retargs, args...)
+	return nil
+}
+
+func (p *Parser) parseNonOption(s *parseState) error {
+	if len(s.positional) > 0 {
+		return s.addArgs(s.arg)
+	}
+
+	if len(s.command.commands) > 0 && len(s.retargs) == 0 {
+		if cmd := s.lookup.commands[s.arg]; cmd != nil {
+			s.command.Active = cmd
+			cmd.fillParseState(s)
+
+			return nil
+		} else if !s.command.SubcommandsOptional {
+			s.addArgs(s.arg)
+			return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg)
+		}
+	}
+
+	if (p.Options & PassAfterNonOption) != None {
+		// If PassAfterNonOption is set then all remaining arguments
+		// are considered positional
+		if err := s.addArgs(s.arg); err != nil {
+			return err
+		}
+
+		if err := s.addArgs(s.args...); err != nil {
+			return err
+		}
+
+		s.args = []string{}
+	} else {
+		return s.addArgs(s.arg)
+	}
+
+	return nil
+}
+
+func (p *Parser) showBuiltinHelp() error {
+	var b bytes.Buffer
+
+	p.WriteHelp(&b)
+	return newError(ErrHelp, b.String())
+}
+
+func (p *Parser) printError(err error) error {
+	if err != nil && (p.Options&PrintErrors) != None {
+		flagsErr, ok := err.(*Error)
+
+		if ok && flagsErr.Type == ErrHelp {
+			fmt.Fprintln(os.Stdout, err)
+		} else {
+			fmt.Fprintln(os.Stderr, err)
+		}
+	}
+
+	return err
+}
+
+func (p *Parser) clearIsSet() {
+	p.eachCommand(func(c *Command) {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				option.isSet = false
+			}
+		})
+	}, true)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go
new file mode 100644
index 0000000..1ca6a85
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize.go
@@ -0,0 +1,28 @@
+// +build !windows,!plan9,!solaris,!appengine
+
+package flags
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+type winsize struct {
+	row, col       uint16
+	xpixel, ypixel uint16
+}
+
+func getTerminalColumns() int {
+	ws := winsize{}
+
+	if tIOCGWINSZ != 0 {
+		syscall.Syscall(syscall.SYS_IOCTL,
+			uintptr(0),
+			uintptr(tIOCGWINSZ),
+			uintptr(unsafe.Pointer(&ws)))
+
+		return int(ws.col)
+	}
+
+	return 80
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
new file mode 100644
index 0000000..3d5385b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
@@ -0,0 +1,7 @@
+// +build windows plan9 solaris appengine
+
+package flags
+
+func getTerminalColumns() int {
+	return 80
+}
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
new file mode 100644
index 0000000..fcc1186
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
@@ -0,0 +1,7 @@
+// +build darwin freebsd netbsd openbsd
+
+package flags
+
+const (
+	tIOCGWINSZ = 0x40087468
+)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
new file mode 100644
index 0000000..e3975e2
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
@@ -0,0 +1,7 @@
+// +build linux
+
+package flags
+
+const (
+	tIOCGWINSZ = 0x5413
+)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
new file mode 100644
index 0000000..3082151
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
@@ -0,0 +1,7 @@
+// +build !darwin,!freebsd,!netbsd,!openbsd,!linux
+
+package flags
+
+const (
+	tIOCGWINSZ = 0
+)
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/buffer.go b/vendor/github.com/jhump/protoreflect/codec/buffer.go
new file mode 100644
index 0000000..b9de99c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/buffer.go
@@ -0,0 +1,112 @@
+package codec
+
+import (
+	"fmt"
+	"io"
+)
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer struct {
+	buf   []byte
+	index int
+
+	// tmp is used when another byte slice is needed, such as when
+	// serializing messages, since we need to know the length before
+	// we can write the length prefix; by caching this, including
+	// after it is grown by serialization operations, we reduce the
+	// number of allocations needed
+	tmp []byte
+
+	deterministic bool
+}
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+	return &Buffer{buf: buf}
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+	cb.deterministic = deterministic
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+	cb.buf = []byte(nil)
+	cb.index = 0
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+	return cb.buf[cb.index:]
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+	return string(cb.Bytes())
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+	return cb.index >= len(cb.buf)
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, false is returned
+// and the buffer is unchanged. Otherwise, the given number of bytes
+// are skipped and true is returned.
+func (cb *Buffer) Skip(count int) error {
+	if count < 0 {
+		return fmt.Errorf("proto: bad byte length %d", count)
+	}
+	newIndex := cb.index + count
+	if newIndex < cb.index || newIndex > len(cb.buf) {
+		return io.ErrUnexpectedEOF
+	}
+	cb.index = newIndex
+	return nil
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+	return len(cb.buf) - cb.index
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+	if cb.index == len(cb.buf) {
+		return 0, io.EOF
+	}
+	copied := copy(dest, cb.buf[cb.index:])
+	cb.index += copied
+	return copied, nil
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+	cb.buf = append(cb.buf, data...)
+	return len(data), nil
+}
+
+var _ io.Writer = (*Buffer)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode.go b/vendor/github.com/jhump/protoreflect/codec/decode.go
new file mode 100644
index 0000000..2a7e59f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode.go
@@ -0,0 +1,372 @@
+package codec
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = errors.New("proto: bad wiretype")
+
+var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+
+func init() {
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+func (cb *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := cb.index
+	l := len(cb.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := cb.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			cb.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = ErrOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+	i := cb.index
+	buf := cb.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		cb.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return cb.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x := uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, ErrOverflow
+
+done:
+	cb.index = i
+	return x, nil
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+	var v uint64
+	v, err = cb.DecodeVarint()
+	if err != nil {
+		return
+	}
+	// low 7 bits is wire type
+	wireType = int8(v & 7)
+	// rest is int32 tag number
+	v = v >> 3
+	if v > math.MaxInt32 {
+		err = fmt.Errorf("tag number out of range: %d", v)
+		return
+	}
+	tag = int32(v)
+	return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 8
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-8])
+	x |= uint64(cb.buf[i-7]) << 8
+	x |= uint64(cb.buf[i-6]) << 16
+	x |= uint64(cb.buf[i-5]) << 24
+	x |= uint64(cb.buf[i-4]) << 32
+	x |= uint64(cb.buf[i-3]) << 40
+	x |= uint64(cb.buf[i-2]) << 48
+	x |= uint64(cb.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 4
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-4])
+	x |= uint64(cb.buf[i-3]) << 8
+	x |= uint64(cb.buf[i-2]) << 16
+	x |= uint64(cb.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigZag32 decodes a signed 32-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag32(v uint64) int32 {
+	return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+// DecodeZigZag64 decodes a signed 64-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag64(v uint64) int64 {
+	return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := cb.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := cb.index + nb
+	if end < cb.index || end > len(cb.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		buf = cb.buf[cb.index:end]
+		cb.index = end
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, cb.buf[cb.index:])
+	cb.index = end
+	return
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+	var groupEnd, dataEnd int
+	groupEnd, dataEnd, err := cb.findGroupEnd()
+	if err != nil {
+		return nil, err
+	}
+	var results []byte
+	if !alloc {
+		results = cb.buf[cb.index:dataEnd]
+	} else {
+		results = make([]byte, dataEnd-cb.index)
+		copy(results, cb.buf[cb.index:])
+	}
+	cb.index = groupEnd
+	return results, nil
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+	groupEnd, _, err := cb.findGroupEnd()
+	if err != nil {
+		return err
+	}
+	cb.index = groupEnd
+	return nil
+}
+
+func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) {
+	bs := cb.buf
+	start := cb.index
+	defer func() {
+		cb.index = start
+	}()
+	for {
+		fieldStart := cb.index
+		// read a field tag
+		_, wireType, err := cb.DecodeTagAndWireType()
+		if err != nil {
+			return 0, 0, err
+		}
+		// skip past the field's data
+		switch wireType {
+		case proto.WireFixed32:
+			if err := cb.Skip(4); err != nil {
+				return 0, 0, err
+			}
+		case proto.WireFixed64:
+			if err := cb.Skip(8); err != nil {
+				return 0, 0, err
+			}
+		case proto.WireVarint:
+			// skip varint by finding last byte (has high bit unset)
+			i := cb.index
+			limit := i + 10 // varint cannot be >10 bytes
+			for {
+				if i >= limit {
+					return 0, 0, ErrOverflow
+				}
+				if i >= len(bs) {
+					return 0, 0, io.ErrUnexpectedEOF
+				}
+				if bs[i]&0x80 == 0 {
+					break
+				}
+				i++
+			}
+			// TODO: This would only overflow if buffer length was MaxInt and we
+			// read the last byte. This is not a real/feasible concern on 64-bit
+			// systems. Something to worry about for 32-bit systems? Do we care?
+			cb.index = i + 1
+		case proto.WireBytes:
+			l, err := cb.DecodeVarint()
+			if err != nil {
+				return 0, 0, err
+			}
+			if err := cb.Skip(int(l)); err != nil {
+				return 0, 0, err
+			}
+		case proto.WireStartGroup:
+			if err := cb.SkipGroup(); err != nil {
+				return 0, 0, err
+			}
+		case proto.WireEndGroup:
+			return cb.index, fieldStart, nil
+		default:
+			return 0, 0, ErrBadWireType
+		}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
new file mode 100644
index 0000000..938b4d9
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -0,0 +1,283 @@
+package codec
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
+// it reads indicates an end-group marker.
+var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group")
+
+// MessageFactory is used to instantiate messages when DecodeFieldValue needs to
+// decode a message value.
+//
+// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which
+// implements this interface.
+type MessageFactory interface {
+	NewMessage(md *desc.MessageDescriptor) proto.Message
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+	// The tag number for the unrecognized field.
+	Tag int32
+
+	// Encoding indicates how the unknown field was encoded on the wire. If it
+	// is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+	// the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+	// significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+	// Value.
+	Encoding int8
+	Contents []byte
+	Value    uint64
+}
+
+// DecodeFieldValue will read a field value from the buffer and return its
+// value and the corresponding field descriptor. The given function is used
+// to lookup a field descriptor by tag number. The given factory is used to
+// instantiate a message if the field value is (or contains) a message value.
+//
+// On error, the field descriptor and value are typically nil. However, if the
+// error returned is ErrWireTypeEndGroup, the returned value will indicate any
+// tag number encoded in the end-group marker.
+//
+// If the field descriptor returned is nil, that means that the given function
+// returned nil. This is expected to happen for unrecognized tag numbers. In
+// that case, no error is returned, and the value will be an UnknownField.
+func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) {
+	if cb.EOF() {
+		return nil, nil, io.EOF
+	}
+	tagNumber, wireType, err := cb.DecodeTagAndWireType()
+	if err != nil {
+		return nil, nil, err
+	}
+	if wireType == proto.WireEndGroup {
+		return nil, tagNumber, ErrWireTypeEndGroup
+	}
+	fd := fieldFinder(tagNumber)
+	if fd == nil {
+		val, err := cb.decodeUnknownField(tagNumber, wireType)
+		return nil, val, err
+	}
+	val, err := cb.decodeKnownField(fd, wireType, fact)
+	return fd, val, err
+}
+
+// DecodeScalarField extracts a properly-typed value from v. The returned value's
+// type depends on the given field descriptor type. It will be the same type as
+// generated structs use for the field descriptor's type. Enum types will return
+// an int32. If the given field type uses length-delimited encoding (nested
+// messages, bytes, and strings), an error is returned.
+func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return v != 0, nil
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return uint32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		s := int64(v)
+		if s > math.MaxInt32 || s < math.MinInt32 {
+			return nil, ErrOverflow
+		}
+		return int32(s), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return int32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return DecodeZigZag32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return v, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return int64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return DecodeZigZag64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if v > math.MaxUint32 {
+			return nil, ErrOverflow
+		}
+		return math.Float32frombits(uint32(v)), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return math.Float64frombits(v), nil
+
+	default:
+		// bytes, string, message, and group cannot be represented as a simple numeric value
+		return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+	}
+}
+
+// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The
+// returned value's type will usually be []byte, string, or, for nested messages,
+// the type returned from the given message factory. However, since repeated
+// scalar fields can be length-delimited, when they used packed encoding, it can
+// also return an []interface{}, where each element is a scalar value. Furthermore,
+// it could return a scalar type, not in a slice, if the given field descriptor is
+// not repeated. This is to support cases where a field is changed from optional
+// to repeated. New code may emit a packed repeated representation, but old code
+// still expects a single scalar value. In this case, if the actual data in bytes
+// contains multiple values, only the last value is returned.
+func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
+	switch {
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return bytes, nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+		return string(bytes), nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
+		fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+		msg := mf.NewMessage(fd.GetMessageType())
+		err := proto.Unmarshal(bytes, msg)
+		if err != nil {
+			return nil, err
+		} else {
+			return msg, nil
+		}
+
+	default:
+		// even if the field is not repeated or not packed, we still parse it as such for
+		// backwards compatibility (e.g. message we are de-serializing could have been both
+		// repeated and packed at the time of serialization)
+		packedBuf := NewBuffer(bytes)
+		var slice []interface{}
+		var val interface{}
+		for !packedBuf.EOF() {
+			var v uint64
+			var err error
+			if varintTypes[fd.GetType()] {
+				v, err = packedBuf.DecodeVarint()
+			} else if fixed32Types[fd.GetType()] {
+				v, err = packedBuf.DecodeFixed32()
+			} else if fixed64Types[fd.GetType()] {
+				v, err = packedBuf.DecodeFixed64()
+			} else {
+				return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+			}
+			if err != nil {
+				return nil, err
+			}
+			val, err = DecodeScalarField(fd, v)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() {
+				slice = append(slice, val)
+			}
+		}
+		if fd.IsRepeated() {
+			return slice, nil
+		} else {
+			// if not a repeated field, last value wins
+			return val, nil
+		}
+	}
+}
+
+func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) {
+	var val interface{}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		var num uint64
+		num, err = b.DecodeFixed32()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+	case proto.WireFixed64:
+		var num uint64
+		num, err = b.DecodeFixed64()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+	case proto.WireVarint:
+		var num uint64
+		num, err = b.DecodeVarint()
+		if err == nil {
+			val, err = DecodeScalarField(fd, num)
+		}
+
+	case proto.WireBytes:
+		alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+		var raw []byte
+		raw, err = b.DecodeRawBytes(alloc)
+		if err == nil {
+			val, err = DecodeLengthDelimitedField(fd, raw, fact)
+		}
+
+	case proto.WireStartGroup:
+		if fd.GetMessageType() == nil {
+			return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+		}
+		msg := fact.NewMessage(fd.GetMessageType())
+		var data []byte
+		data, err = b.ReadGroup(false)
+		if err == nil {
+			err = proto.Unmarshal(data, msg)
+			if err == nil {
+				val = msg
+			}
+		}
+
+	default:
+		return nil, ErrBadWireType
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return val, nil
+}
+
+func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) {
+	u := UnknownField{Tag: tagNumber, Encoding: encoding}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		u.Value, err = b.DecodeFixed32()
+	case proto.WireFixed64:
+		u.Value, err = b.DecodeFixed64()
+	case proto.WireVarint:
+		u.Value, err = b.DecodeVarint()
+	case proto.WireBytes:
+		u.Contents, err = b.DecodeRawBytes(true)
+	case proto.WireStartGroup:
+		u.Contents, err = b.ReadGroup(true)
+	default:
+		err = ErrBadWireType
+	}
+	if err != nil {
+		return nil, err
+	}
+	return u, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go
new file mode 100644
index 0000000..f76499f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/doc.go
@@ -0,0 +1,7 @@
+// Package codec contains a reader/write type that assists with encoding
+// and decoding protobuf's binary representation.
+//
+// The code in this package began as a fork of proto.Buffer but provides
+// additional API to make it more useful to code that needs to dynamically
+// process or produce the protobuf binary format.
+package codec
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode.go b/vendor/github.com/jhump/protoreflect/codec/encode.go
new file mode 100644
index 0000000..c84523f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode.go
@@ -0,0 +1,163 @@
+package codec
+
+import "github.com/golang/protobuf/proto"
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	cb.buf = append(cb.buf, uint8(x))
+	return nil
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+	v := uint64((int64(tag) << 3) | int64(wireType))
+	return cb.EncodeVarint(v)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigZag64 does zig-zag encoding to convert the given
+// signed 64-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag64(v int64) uint64 {
+	return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+// EncodeZigZag32 does zig-zag encoding to convert the given
+// signed 32-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag32(v int32) uint64 {
+	return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+	if err := cb.EncodeVarint(uint64(len(b))); err != nil {
+		return err
+	}
+	cb.buf = append(cb.buf, b...)
+	return nil
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+	bytes, err := marshalMessage(cb.buf, pm, cb.deterministic)
+	if err != nil {
+		return err
+	}
+	cb.buf = bytes
+	return nil
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+	bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic)
+	if err != nil {
+		return err
+	}
+	// save truncated buffer if it was grown (so we can re-use it and
+	// curtail future allocations)
+	if cap(bytes) > cap(cb.tmp) {
+		cb.tmp = bytes[:0]
+	}
+	return cb.EncodeRawBytes(bytes)
+}
+
+func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) {
+	// we try to use the most efficient way to marshal to existing slice
+	nm, ok := pm.(interface {
+		// this interface is implemented by generated messages
+		XXX_Size() int
+		XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+	})
+	if ok {
+		sz := nm.XXX_Size()
+		if cap(b) < len(b)+sz {
+			// re-allocate to fit
+			bytes := make([]byte, len(b), len(b)+sz)
+			copy(bytes, b)
+			b = bytes
+		}
+		return nm.XXX_Marshal(b, deterministic)
+	}
+
+	if deterministic {
+		// see if the message has custom deterministic methods, preferring an
+		// "append" method over one that must always re-allocate
+		madm, ok := pm.(interface {
+			MarshalAppendDeterministic(b []byte) ([]byte, error)
+		})
+		if ok {
+			return madm.MarshalAppendDeterministic(b)
+		}
+
+		mdm, ok := pm.(interface {
+			MarshalDeterministic() ([]byte, error)
+		})
+		if ok {
+			bytes, err := mdm.MarshalDeterministic()
+			if err != nil {
+				return nil, err
+			}
+			if len(b) == 0 {
+				return bytes, nil
+			}
+			return append(b, bytes...), nil
+		}
+	}
+
+	mam, ok := pm.(interface {
+		// see if we can append the message, vs. having to re-allocate
+		MarshalAppend(b []byte) ([]byte, error)
+	})
+	if ok {
+		return mam.MarshalAppend(b)
+	}
+
+	// lowest common denominator
+	bytes, err := proto.Marshal(pm)
+	if err != nil {
+		return nil, err
+	}
+	if len(b) == 0 {
+		return bytes, nil
+	}
+	return append(b, bytes...), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
new file mode 100644
index 0000000..cda7299
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -0,0 +1,267 @@
+package codec
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+	if fd.IsMap() {
+		mp := val.(map[interface{}]interface{})
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valType := entryType.FindFieldByNumber(2)
+		var entryBuffer Buffer
+		if cb.deterministic {
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, k := range keys {
+				v := mp[k]
+				entryBuffer.Reset()
+				if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+					return err
+				}
+				if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+					return err
+				}
+				if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+					return err
+				}
+				if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+					return err
+				}
+			}
+		} else {
+			for k, v := range mp {
+				entryBuffer.Reset()
+				if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+					return err
+				}
+				if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+					return err
+				}
+				if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+					return err
+				}
+				if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	} else if fd.IsRepeated() {
+		sl := val.([]interface{})
+		wt, err := getWireType(fd.GetType())
+		if err != nil {
+			return err
+		}
+		if isPacked(fd) && len(sl) > 1 &&
+			(wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+			// packed repeated field
+			var packedBuffer Buffer
+			for _, v := range sl {
+				if err := packedBuffer.encodeFieldValue(fd, v); err != nil {
+					return err
+				}
+			}
+			if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+				return err
+			}
+			return cb.EncodeRawBytes(packedBuffer.Bytes())
+		} else {
+			// non-packed repeated field
+			for _, v := range sl {
+				if err := cb.encodeFieldElement(fd, v); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	} else {
+		return cb.encodeFieldElement(fd, val)
+	}
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+	opts := fd.AsFieldDescriptorProto().GetOptions()
+	// if set, use that value
+	if opts != nil && opts.Packed != nil {
+		return opts.GetPacked()
+	}
+	// if unset: proto2 defaults to false, proto3 to true
+	return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+	return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+	vi := s[i]
+	vj := s[j]
+	switch reflect.TypeOf(vi).Kind() {
+	case reflect.Int32:
+		return vi.(int32) < vj.(int32)
+	case reflect.Int64:
+		return vi.(int64) < vj.(int64)
+	case reflect.Uint32:
+		return vi.(uint32) < vj.(uint32)
+	case reflect.Uint64:
+		return vi.(uint64) < vj.(uint64)
+	case reflect.String:
+		return vi.(string) < vj.(string)
+	case reflect.Bool:
+		return !vi.(bool) && vj.(bool)
+	default:
+		panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+	}
+}
+
+func (s sortable) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error {
+	wt, err := getWireType(fd.GetType())
+	if err != nil {
+		return err
+	}
+	if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil {
+		return err
+	}
+	if err := b.encodeFieldValue(fd, val); err != nil {
+		return err
+	}
+	if wt == proto.WireStartGroup {
+		return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup)
+	}
+	return nil
+}
+
+func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		v := val.(bool)
+		if v {
+			return b.EncodeVarint(1)
+		}
+		return b.EncodeVarint(0)
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_INT32:
+		v := val.(int32)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		v := val.(int32)
+		return b.EncodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		v := val.(int32)
+		return b.EncodeVarint(EncodeZigZag32(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		v := val.(uint32)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		v := val.(uint32)
+		return b.EncodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		v := val.(int64)
+		return b.EncodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		v := val.(int64)
+		return b.EncodeFixed64(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		v := val.(int64)
+		return b.EncodeVarint(EncodeZigZag64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		v := val.(uint64)
+		return b.EncodeVarint(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		v := val.(uint64)
+		return b.EncodeFixed64(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		v := val.(float64)
+		return b.EncodeFixed64(math.Float64bits(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		v := val.(float32)
+		return b.EncodeFixed32(uint64(math.Float32bits(v)))
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		v := val.([]byte)
+		return b.EncodeRawBytes(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		v := val.(string)
+		return b.EncodeRawBytes(([]byte)(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return b.EncodeDelimitedMessage(val.(proto.Message))
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		// just append the nested message to this buffer
+		return b.EncodeMessage(val.(proto.Message))
+		// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+	default:
+		return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return proto.WireVarint, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return proto.WireFixed32, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return proto.WireFixed64, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES,
+		descriptor.FieldDescriptorProto_TYPE_STRING,
+		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return proto.WireBytes, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return proto.WireStartGroup, nil
+
+	default:
+		return 0, ErrBadWireType
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..538820c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,231 @@
+package desc
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+	intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fd, deps, nil)
+}
+
+func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+	ret := &FileDescriptor{
+		proto:      fd,
+		symbols:    map[string]Descriptor{},
+		fieldIndex: map[string]map[int32]*FieldDescriptor{},
+	}
+	pkg := fd.GetPackage()
+
+	// populate references to file descriptor dependencies
+	files := map[string]*FileDescriptor{}
+	for _, f := range deps {
+		files[f.proto.GetName()] = f
+	}
+	ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, d := range fd.GetDependency() {
+		resolved := r.ResolveImport(fd.GetName(), d)
+		ret.deps[i] = files[resolved]
+		if ret.deps[i] == nil {
+			if resolved != d {
+				ret.deps[i] = files[d]
+			}
+			if ret.deps[i] == nil {
+				return nil, intn.ErrNoSuchFile(d)
+			}
+		}
+	}
+	ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+	for i, pd := range fd.GetPublicDependency() {
+		ret.publicDeps[i] = ret.deps[pd]
+	}
+	ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+	for i, wd := range fd.GetWeakDependency() {
+		ret.weakDeps[i] = ret.deps[wd]
+	}
+	ret.isProto3 = fd.GetSyntax() == "proto3"
+
+	// populate all tables of child descriptors
+	for _, m := range fd.GetMessageType() {
+		md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
+		ret.symbols[n] = md
+		ret.messages = append(ret.messages, md)
+	}
+	for _, e := range fd.GetEnumType() {
+		ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
+		ret.symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range fd.GetExtension() {
+		exd, n := createFieldDescriptor(ret, ret, pkg, ex)
+		ret.symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for _, s := range fd.GetService() {
+		sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
+		ret.symbols[n] = sd
+		ret.services = append(ret.services, sd)
+	}
+
+	ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+	ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+	// now we can resolve all type references and source code info
+	scopes := []scope{fileScope(ret)}
+	path := make([]int32, 1, 8)
+	path[0] = internal.File_messagesTag
+	for i, md := range ret.messages {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_enumsTag
+	for i, ed := range ret.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[0] = internal.File_extensionsTag
+	for i, exd := range ret.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_servicesTag
+	for i, sd := range ret.services {
+		if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+
+	return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+	if len(fds) == 0 {
+		return nil, nil
+	}
+	files := map[string]*dpb.FileDescriptorProto{}
+	resolved := map[string]*FileDescriptor{}
+	var name string
+	for _, fd := range fds {
+		name = fd.GetName()
+		files[name] = fd
+	}
+	for _, fd := range fds {
+		_, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
+	var fdps []*dpb.FileDescriptorProto
+	addAllFiles(fds, &fdps, map[string]struct{}{})
+	return &dpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+	for _, fd := range src {
+		if _, ok := seen[fd.GetName()]; ok {
+			continue
+		}
+		seen[fd.GetName()] = struct{}{}
+		addAllFiles(fd.GetDependencies(), results, seen)
+		*results = append(*results, fd.AsFileDescriptorProto())
+	}
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+	result, err := createFileDescriptorsFromSet(fds, r)
+	if err != nil {
+		return nil, err
+	}
+	files := fds.GetFile()
+	lastFilename := files[len(files)-1].GetName()
+	return result[lastFilename], nil
+}
+
+// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set.
+// The returned map includes all files in the set, keyed b name. The set must include the
+// full set of transitive dependencies for all files therein or else a link error will occur
+// and be returned instead of the slice of descriptors. This is the same format used by
+// protoc when a FileDescriptorSet file with an invocation like so:
+//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+	return createFileDescriptorsFromSet(fds, nil)
+}
+
+func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+	files := fds.GetFile()
+	if len(files) == 0 {
+		return nil, errors.New("file descriptor set is empty")
+	}
+	return createFileDescriptors(files, r)
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+	for _, s := range seen {
+		if filename == s {
+			return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+		}
+	}
+	seen = append(seen, filename)
+
+	if d, ok := resolved[filename]; ok {
+		return d, nil
+	}
+	fdp := files[filename]
+	if fdp == nil {
+		return nil, intn.ErrNoSuchFile(filename)
+	}
+	deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+	for i, depName := range fdp.GetDependency() {
+		resolvedDep := r.ResolveImport(filename, depName)
+		dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+		if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+			dep, err = createFromSet(depName, r, seen, files, resolved)
+		}
+		if err != nil {
+			return nil, err
+		}
+		deps[i] = dep
+	}
+	d, err := createFileDescriptor(fdp, deps, r)
+	if err != nil {
+		return nil, err
+	}
+	resolved[filename] = d
+	return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..ab235a3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1666 @@
+package desc
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+	// GetName returns the name of the object described by the descriptor. This will
+	// be a base name that does not include enclosing message names or the package name.
+	// For file descriptors, this indicates the path and name to the described file.
+	GetName() string
+	// GetFullyQualifiedName returns the fully-qualified name of the object described by
+	// the descriptor. This will include the package name and any enclosing message names.
+	// For file descriptors, this returns the path and name to the described file (same as
+	// GetName).
+	GetFullyQualifiedName() string
+	// GetParent returns the enclosing element in a proto source file. If the described
+	// object is a top-level object, this returns the file descriptor. Otherwise, it returns
+	// the element in which the described object was declared. File descriptors have no
+	// parent and return nil.
+	GetParent() Descriptor
+	// GetFile returns the file descriptor in which this element was declared. File
+	// descriptors return themselves.
+	GetFile() *FileDescriptor
+	// GetOptions returns the options proto containing options for the described element.
+	GetOptions() proto.Message
+	// GetSourceInfo returns any source code information that was present in the file
+	// descriptor. Source code info is optional. If no source code info is available for
+	// the element (including if there is none at all in the file descriptor) then this
+	// returns nil
+	GetSourceInfo() *dpb.SourceCodeInfo_Location
+	// AsProto returns the underlying descriptor proto for this descriptor.
+	AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+	proto      *dpb.FileDescriptorProto
+	symbols    map[string]Descriptor
+	deps       []*FileDescriptor
+	publicDeps []*FileDescriptor
+	weakDeps   []*FileDescriptor
+	messages   []*MessageDescriptor
+	enums      []*EnumDescriptor
+	extensions []*FieldDescriptor
+	services   []*ServiceDescriptor
+	fieldIndex map[string]map[int32]*FieldDescriptor
+	isProto3   bool
+	sourceInfo internal.SourceInfoMap
+	sourceInfoRecomputeFunc
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+	internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+	fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+	if fields == nil {
+		fields = map[int32]*FieldDescriptor{}
+		fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+	}
+	fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+	return fd.proto.GetName()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+	return fd.proto.GetPackage()
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+	return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+	return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+func (fd *FileDescriptor) IsProto3() bool {
+	return fd.isProto3
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+	return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+	return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+	return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+	return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+	return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+	return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+	return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+	if symbol[0] == '.' {
+		symbol = symbol[1:]
+	}
+	return fd.symbols[symbol]
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+	if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+	if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+		return ed
+	} else {
+		return nil
+	}
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+	if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+		return sd
+	} else {
+		return nil
+	}
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+	if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+	if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+	proto          *dpb.DescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	fields         []*FieldDescriptor
+	nested         []*MessageDescriptor
+	enums          []*EnumDescriptor
+	extensions     []*FieldDescriptor
+	oneOfs         []*OneOfDescriptor
+	extRanges      extRanges
+	fqn            string
+	sourceInfoPath []int32
+	jsonNames      jsonNameMap
+	isProto3       bool
+	isMapEntry     bool
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
+	msgName := merge(enclosing, md.GetName())
+	ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
+	for _, f := range md.GetField() {
+		fld, n := createFieldDescriptor(fd, ret, msgName, f)
+		symbols[n] = fld
+		ret.fields = append(ret.fields, fld)
+	}
+	for _, nm := range md.NestedType {
+		nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
+		symbols[n] = nmd
+		ret.nested = append(ret.nested, nmd)
+	}
+	for _, e := range md.EnumType {
+		ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
+		symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range md.GetExtension() {
+		exd, n := createFieldDescriptor(fd, ret, msgName, ex)
+		symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for i, o := range md.GetOneofDecl() {
+		od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
+		symbols[n] = od
+		ret.oneOfs = append(ret.oneOfs, od)
+	}
+	for _, r := range md.GetExtensionRange() {
+		// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+		// but protoc converts range to exclusive end in descriptor, so we must convert back
+		end := r.GetEnd() - 1
+		ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+			Start: r.GetStart(),
+			End:   end})
+	}
+	sort.Sort(ret.extRanges)
+	ret.isProto3 = fd.isProto3
+	ret.isMapEntry = md.GetOptions().GetMapEntry() &&
+		len(ret.fields) == 2 &&
+		ret.fields[0].GetNumber() == 1 &&
+		ret.fields[1].GetNumber() == 2
+
+	return ret, msgName
+}
+
+func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Message_nestedMessagesTag)
+	scopes = append(scopes, messageScope(md))
+	for i, nmd := range md.nested {
+		if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_enumsTag
+	for i, ed := range md.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[len(path)-1] = internal.Message_fieldsTag
+	for i, fld := range md.fields {
+		if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_extensionsTag
+	for i, exd := range md.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_oneOfsTag
+	for i, od := range md.oneOfs {
+		od.resolve(append(path, int32(i)))
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+	return md.isMapEntry
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+	return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+	return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+	return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+	return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+	return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+	return md.isProto3
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+	return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+	return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+	return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+	var buf bytes.Buffer
+	first := true
+	for _, r := range er {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(",")
+		}
+		fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+	}
+	return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+	i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+	return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+	return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+	return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+	er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+	fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+	if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+	if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+	proto          *dpb.FieldDescriptorProto
+	parent         Descriptor
+	owner          *MessageDescriptor
+	file           *FileDescriptor
+	oneOf          *OneOfDescriptor
+	msgType        *MessageDescriptor
+	enumType       *EnumDescriptor
+	fqn            string
+	sourceInfoPath []int32
+	def            memoizedDefault
+	isMap          bool
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
+	fldName := merge(enclosing, fld.GetName())
+	ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
+	if fld.GetExtendee() == "" {
+		ret.owner = parent.(*MessageDescriptor)
+	}
+	// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+	return ret, fldName
+}
+
+func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+	if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+		return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+	}
+	fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.enumType = desc.(*EnumDescriptor)
+		}
+	}
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.msgType = desc.(*MessageDescriptor)
+		}
+	}
+	if fd.proto.GetExtendee() != "" {
+		if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+			return err
+		} else {
+			fd.owner = desc.(*MessageDescriptor)
+		}
+	}
+	fd.file.registerField(fd)
+	fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
+		fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().IsMapEntry()
+	return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+	if fd.IsMap() {
+		return map[interface{}]interface{}(nil)
+	} else if fd.IsRepeated() {
+		return []interface{}(nil)
+	} else if fd.msgType != nil {
+		return nil
+	}
+
+	proto3 := fd.file.isProto3
+	if !proto3 {
+		def := fd.AsFieldDescriptorProto().GetDefaultValue()
+		if def != "" {
+			ret := parseDefaultValue(fd, def)
+			if ret != nil {
+				return ret
+			}
+			// if we can't parse default value, fall-through to return normal default...
+		}
+	}
+
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_FIXED32,
+		dpb.FieldDescriptorProto_TYPE_UINT32:
+		return uint32(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED32,
+		dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32:
+		return int32(0)
+	case dpb.FieldDescriptorProto_TYPE_FIXED64,
+		dpb.FieldDescriptorProto_TYPE_UINT64:
+		return uint64(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED64,
+		dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64:
+		return int64(0)
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		return float32(0.0)
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		return float64(0.0)
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		return false
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(nil)
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return ""
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		if proto3 {
+			return int32(0)
+		}
+		enumVals := fd.GetEnumType().GetValues()
+		if len(enumVals) > 0 {
+			return enumVals[0].GetNumber()
+		} else {
+			return int32(0) // WTF?
+		}
+	default:
+		panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+	}
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		vd := fd.GetEnumType().FindValueByName(val)
+		if vd != nil {
+			return vd.GetNumber()
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		if val == "true" {
+			return true
+		} else if val == "false" {
+			return false
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(unescape(val))
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return val
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := strconv.ParseFloat(val, 32); err == nil {
+			return float32(f)
+		} else {
+			return float32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		if f, err := strconv.ParseFloat(val, 64); err == nil {
+			return f
+		} else {
+			return float64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32,
+		dpb.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+			return int32(i)
+		} else {
+			return int32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT32,
+		dpb.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+			return uint32(i)
+		} else {
+			return uint32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64,
+		dpb.FieldDescriptorProto_TYPE_SFIXED64:
+		if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+			return i
+		} else {
+			return int64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT64,
+		dpb.FieldDescriptorProto_TYPE_FIXED64:
+		if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+			return i
+		} else {
+			return uint64(0)
+		}
+	default:
+		return nil
+	}
+}
+
+func unescape(s string) string {
+	// protoc encodes default values for 'bytes' fields using C escaping,
+	// so this function reverses that escaping
+	out := make([]byte, 0, len(s))
+	var buf [4]byte
+	for len(s) > 0 {
+		if s[0] != '\\' || len(s) < 2 {
+			// not escape sequence, or too short to be well-formed escape
+			out = append(out, s[0])
+			s = s[1:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			n := matchPrefix(s[2:], 2, isHex)
+			if n == 0 {
+				// bad escape
+				out = append(out, s[:2]...)
+				s = s[2:]
+			} else {
+				c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+				if err != nil {
+					// shouldn't really happen...
+					out = append(out, s[:2+n]...)
+				} else {
+					out = append(out, byte(c))
+				}
+				s = s[2+n:]
+			}
+		} else if s[1] >= '0' && s[1] <= '7' {
+			n := 1 + matchPrefix(s[2:], 2, isOctal)
+			c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil || c > 0xff {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(c))
+			}
+			s = s[1+n:]
+		} else if s[1] == 'u' {
+			if len(s) < 6 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:6], 16, 16)
+				if err != nil {
+					// bad escape
+					out = append(out, s[:6]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[6:]
+			}
+		} else if s[1] == 'U' {
+			if len(s) < 10 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:10], 16, 32)
+				if err != nil || c > 0x10ffff {
+					// bad escape
+					out = append(out, s[:10]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[10:]
+			}
+		} else {
+			switch s[1] {
+			case 'a':
+				out = append(out, '\a')
+			case 'b':
+				out = append(out, '\b')
+			case 'f':
+				out = append(out, '\f')
+			case 'n':
+				out = append(out, '\n')
+			case 'r':
+				out = append(out, '\r')
+			case 't':
+				out = append(out, '\t')
+			case 'v':
+				out = append(out, '\v')
+			case '\\':
+				out = append(out, '\\')
+			case '\'':
+				out = append(out, '\'')
+			case '"':
+				out = append(out, '"')
+			case '?':
+				out = append(out, '?')
+			default:
+				// invalid escape, just copy it as-is
+				out = append(out, s[:2]...)
+			}
+			s = s[2:]
+		}
+	}
+	return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+	return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+	l := len(s)
+	if l > limit {
+		l = limit
+	}
+	i := 0
+	for ; i < l; i++ {
+		if !fn(s[i]) {
+			return i
+		}
+	}
+	return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+	return fd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+	return fd.fqn
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+	return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+	return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+	if jsonName := fd.proto.GetJsonName(); jsonName != "" {
+		return jsonName
+	}
+	return fd.proto.GetName()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+	parent := fd.GetParent()
+	switch parent := parent.(type) {
+	case *FileDescriptor:
+		pkg := parent.GetPackage()
+		if pkg == "" {
+			return fd.GetJSONName()
+		}
+		return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+	default:
+		return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+	}
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+	return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+	return fd.proto.GetExtendee() != ""
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+	return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+	return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+	return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+	return fd.isMap
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(1))
+	}
+	return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(2))
+	}
+	return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+	return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+	return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+	return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+	proto          *dpb.EnumDescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	values         []*EnumValueDescriptor
+	valuesByNum    sortedValues
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
+	enumName := merge(enclosing, ed.GetName())
+	ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
+	for _, ev := range ed.GetValue() {
+		evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
+		symbols[n] = evd
+		ret.values = append(ret.values, evd)
+	}
+	if len(ret.values) > 0 {
+		ret.valuesByNum = make(sortedValues, len(ret.values))
+		copy(ret.valuesByNum, ret.values)
+		sort.Stable(ret.valuesByNum)
+	}
+	return ret, enumName
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+	return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+	return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+	sv[i], sv[j] = sv[j], sv[i]
+}
+
+func (ed *EnumDescriptor) resolve(path []int32) {
+	ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Enum_valuesTag)
+	for i, evd := range ed.values {
+		evd.resolve(append(path, int32(i)))
+	}
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+	return ed.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+	return ed.fqn
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+	return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+	return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+	return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+	return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+	return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+	return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+	return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+	return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+	fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+	if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+		return vd
+	} else {
+		return nil
+	}
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+	index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+	if index < len(ed.valuesByNum) {
+		vd := ed.valuesByNum[index]
+		if vd.GetNumber() == num {
+			return vd
+		}
+	}
+	return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+	proto          *dpb.EnumValueDescriptorProto
+	parent         *EnumDescriptor
+	file           *FileDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
+	valName := merge(enclosing, evd.GetName())
+	return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+	vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+	return vd.proto.GetName()
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+	return vd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+	return vd.fqn
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+	return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+	return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+	return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+	return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+	return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+	return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+	return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+	return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+	proto          *dpb.ServiceDescriptorProto
+	file           *FileDescriptor
+	methods        []*MethodDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
+	serviceName := merge(enclosing, sd.GetName())
+	ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
+	for _, m := range sd.GetMethod() {
+		md, n := createMethodDescriptor(fd, ret, serviceName, m)
+		symbols[n] = md
+		ret.methods = append(ret.methods, md)
+	}
+	return ret, serviceName
+}
+
+func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
+	sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Service_methodsTag)
+	for i, md := range sd.methods {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+	return sd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+	return sd.fqn
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+	return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+	return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+	return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+	return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+	return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+	return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+	return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+	return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+	fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+	if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+	proto          *dpb.MethodDescriptorProto
+	parent         *ServiceDescriptor
+	file           *FileDescriptor
+	inType         *MessageDescriptor
+	outType        *MessageDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
+	// request and response types get resolved later
+	methodName := merge(enclosing, md.GetName())
+	return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+}
+
+func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
+		return err
+	} else {
+		md.inType = desc.(*MessageDescriptor)
+	}
+	if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+		return err
+	} else {
+		md.outType = desc.(*MessageDescriptor)
+	}
+	return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+	return md.proto.GetServerStreaming()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+	return md.proto.GetClientStreaming()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+	return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+	return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+	proto          *dpb.OneofDescriptorProto
+	parent         *MessageDescriptor
+	file           *FileDescriptor
+	choices        []*FieldDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
+	oneOfName := merge(enclosing, od.GetName())
+	ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+	for _, f := range parent.fields {
+		oi := f.proto.OneofIndex
+		if oi != nil && *oi == int32(index) {
+			f.oneOf = ret
+			ret.choices = append(ret.choices, f)
+		}
+	}
+	return ret, oneOfName
+}
+
+func (od *OneOfDescriptor) resolve(path []int32) {
+	od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+	return od.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+	return od.fqn
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+	return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+	return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+	return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+	return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+	return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+	return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+	return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+	return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+	return od.choices
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(string) Descriptor
+
+func fileScope(fd *FileDescriptor) scope {
+	// we search symbols in this file, but also symbols in other files that have
+	// the same package as this file or a "parent" package (in protobuf,
+	// packages are a hierarchy like C++ namespaces)
+	prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
+	return func(name string) Descriptor {
+		for _, prefix := range prefixes {
+			n := merge(prefix, name)
+			d := findSymbol(fd, n, false)
+			if d != nil {
+				return d
+			}
+		}
+		return nil
+	}
+}
+
+func messageScope(md *MessageDescriptor) scope {
+	return func(name string) Descriptor {
+		n := merge(md.fqn, name)
+		if d, ok := md.file.symbols[n]; ok {
+			return d
+		}
+		return nil
+	}
+}
+
+func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
+	if strings.HasPrefix(name, ".") {
+		// already fully-qualified
+		d := findSymbol(fd, name[1:], false)
+		if d != nil {
+			return d, nil
+		}
+	} else {
+		// unqualified, so we look in the enclosing (last) scope first and move
+		// towards outermost (first) scope, trying to resolve the symbol
+		for i := len(scopes) - 1; i >= 0; i-- {
+			d := scopes[i](name)
+			if d != nil {
+				return d, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
+}
+
+func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
+	d := fd.symbols[name]
+	if d != nil {
+		return d
+	}
+
+	// When public = false, we are searching only directly imported symbols. But we
+	// also need to search transitive public imports due to semantics of public imports.
+	var deps []*FileDescriptor
+	if public {
+		deps = fd.publicDeps
+	} else {
+		deps = fd.deps
+	}
+	for _, dep := range deps {
+		d = findSymbol(dep, name, true)
+		if d != nil {
+			return d
+		}
+	}
+
+	return nil
+}
+
+func merge(a, b string) string {
+	if a == "" {
+		return b
+	} else {
+		return a + "." + b
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..cd7348e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,31 @@
+//+build appengine gopherjs purego
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: With allowed use of unsafe, we use it to atomically define an index
+	// via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+	// and do an linear scan of fields each time.
+	for _, f := range md.fields {
+		jn := f.proto.GetJsonName()
+		if jn == "" {
+			jn = f.proto.GetName()
+		}
+		if jn == jsonName {
+			return f
+		}
+	}
+	return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..19b808d
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,60 @@
+//+build !appengine,!gopherjs,!purego
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{}            // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: We don't want to eagerly index JSON names because many programs won't use it.
+	// So we want to do it lazily, but also make sure the result is thread-safe. So we
+	// atomically load/store the map as if it were a normal pointer. We don't use other
+	// mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+	// do this lazily because those types cannot be copied, and we'd rather not induce
+	// 'go vet' errors in programs that use descriptors and try to copy them.
+	// If multiple goroutines try to access the index at the same time, before it is
+	// built, they will all end up computing the index redundantly. Future reads of
+	// the index will use whatever was the "last one stored" by those racing goroutines.
+	// Since building the index is deterministic, this is fine: all indices computed
+	// will be the same.
+	addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+	jsonNames := atomic.LoadPointer(addrOfJsonNames)
+	var index map[string]*FieldDescriptor
+	if jsonNames == nil {
+		// slow path: compute the index
+		index = map[string]*FieldDescriptor{}
+		for _, f := range md.fields {
+			jn := f.proto.GetJsonName()
+			if jn == "" {
+				jn = f.proto.GetName()
+			}
+			index[jn] = f
+		}
+		atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+	} else {
+		*(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+	}
+	return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+	def := atomic.LoadPointer(addrOfDef)
+	if def != nil {
+		return *(*interface{})(def)
+	}
+	// slow path: compute the default, potentially involves decoding value
+	d := fd.determineDefault()
+	atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+	return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..1740dce
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,41 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..ab93032
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,313 @@
+package desc
+
+import (
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+var (
+	globalImportPathConf map[string]string
+	globalImportPathMu   sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	desc := proto.FileDescriptor(registerPath)
+	if len(desc) == 0 {
+		panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+	}
+	globalImportPathMu.Lock()
+	defer globalImportPathMu.Unlock()
+	if reg := globalImportPathConf[importPath]; reg != "" {
+		panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+	}
+	if globalImportPathConf == nil {
+		globalImportPathConf = map[string]string{}
+	}
+	globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+	importPath = clean(importPath)
+	globalImportPathMu.RLock()
+	defer globalImportPathMu.RUnlock()
+	reg := globalImportPathConf[importPath]
+	if reg == "" {
+		return importPath
+	}
+	return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+//    import "foo/bar.proto";
+// However, when protoc is invoked, the command-line args looks like so:
+//    protoc -Ifoo/ --go_out=foo/ bar.proto
+//    protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+//    fd, err := desc.LoadFileDescriptor("baz.proto")
+//    // err will be non-nil, complaining that there is no such file
+//    // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+//    var r desc.ImportResolver
+//    r.RegisterImportPath("bar.proto", "foo/bar.proto")
+//    fd, err := r.LoadFileDescriptor("baz.proto")
+//    // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+type ImportResolver struct {
+	children    map[string]*ImportResolver
+	importPaths map[string]string
+
+	// By default, an ImportResolver will fallback to consulting any paths
+	// registered via the top-level RegisterImportPath function. Setting this
+	// field to true will cause the ImportResolver to skip that fallback and
+	// only examine its own locally registered paths.
+	SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+	if r != nil {
+		res := r.resolveImport(clean(source), clean(importPath))
+		if res != "" {
+			return res
+		}
+		if r.SkipFallbackRules {
+			return importPath
+		}
+	}
+	return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+	if source == "" {
+		return r.importPaths[importPath]
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch != nil {
+		if reg := ch.resolveImport(cdr, importPath); reg != "" {
+			return reg
+		}
+	}
+	return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+	r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+	importPath = clean(importPath)
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	registerPath = clean(registerPath)
+	if len(registerPath) == 0 {
+		panic("registered path cannot be empty")
+	}
+	r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+	if source == "" {
+		if r.importPaths == nil {
+			r.importPaths = map[string]string{}
+		} else if reg := r.importPaths[importPath]; reg != "" {
+			panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+		}
+		r.importPaths[importPath] = registerPath
+		return
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch == nil {
+		if r.children == nil {
+			r.children = map[string]*ImportResolver{}
+		}
+		ch = &ImportResolver{}
+		r.children[car] = ch
+	}
+	ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+	return loadFileDescriptor(filePath, r)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(msgName, r)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(msg, r)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(msgType, r)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, r)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, r)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, r)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, r)
+}
+
+// CreateFileDescriptorsFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+	return createFileDescriptorsFromSet(fds, r)
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+
+func clean(path string) string {
+	if path == "" {
+		return ""
+	}
+	path = filepath.Clean(path)
+	if path == "." {
+		return ""
+	}
+	return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..b4150b8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,107 @@
+package internal
+
+import (
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path. If there are
+// multiple locations for the same path, the first one is returned.
+func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+	v := m[asMapKey(path)]
+	if len(v) > 0 {
+		return v[0]
+	}
+	return nil
+}
+
+// GetAll returns all source code info for the given path.
+func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+	return m[asMapKey(path)]
+}
+
+// Add stores the given source code info for the given path.
+func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+	m[asMapKey(path)] = append(m[asMapKey(path)], loc)
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+	k := asMapKey(path)
+	if _, ok := m[k]; ok {
+		return false
+	}
+	m[k] = []*dpb.SourceCodeInfo_Location{loc}
+	return true
+}
+
+func asMapKey(slice []int32) string {
+	// NB: arrays should be usable as map keys, but this does not
+	// work due to a bug: https://github.com/golang/go/issues/22605
+	//rv := reflect.ValueOf(slice)
+	//arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+	//array := reflect.New(arrayType).Elem()
+	//reflect.Copy(array, rv)
+	//return array.Interface()
+
+	b := make([]byte, len(slice)*4)
+	j := 0
+	for _, s := range slice {
+		b[j] = byte(s)
+		b[j+1] = byte(s >> 8)
+		b[j+2] = byte(s >> 16)
+		b[j+3] = byte(s >> 24)
+		j += 4
+	}
+	return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+	res := SourceInfoMap{}
+	PopulateSourceInfoMap(fd, res)
+	return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+	for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+		m.Add(l.Path, l)
+	}
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+	recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+	f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+	c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..139c9cd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,270 @@
+package internal
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+const (
+	// MaxTag is the maximum allowed tag number for a field.
+	MaxTag = 536870911 // 2^29 - 1
+
+	// SpecialReservedStart is the first tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedStart = 19000
+	// SpecialReservedEnd is the last tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedEnd = 19999
+
+	// NB: It would be nice to use constants from generated code instead of
+	// hard-coding these here. But code-gen does not emit these as constants
+	// anywhere. The only places they appear in generated code are struct tags
+	// on fields of the generated descriptor protos.
+
+	// File_packageTag is the tag number of the package element in a file
+	// descriptor proto.
+	File_packageTag = 2
+	// File_dependencyTag is the tag number of the dependencies element in a
+	// file descriptor proto.
+	File_dependencyTag = 3
+	// File_messagesTag is the tag number of the messages element in a file
+	// descriptor proto.
+	File_messagesTag = 4
+	// File_enumsTag is the tag number of the enums element in a file descriptor
+	// proto.
+	File_enumsTag = 5
+	// File_servicesTag is the tag number of the services element in a file
+	// descriptor proto.
+	File_servicesTag = 6
+	// File_extensionsTag is the tag number of the extensions element in a file
+	// descriptor proto.
+	File_extensionsTag = 7
+	// File_optionsTag is the tag number of the options element in a file
+	// descriptor proto.
+	File_optionsTag = 8
+	// File_syntaxTag is the tag number of the syntax element in a file
+	// descriptor proto.
+	File_syntaxTag = 12
+	// Message_nameTag is the tag number of the name element in a message
+	// descriptor proto.
+	Message_nameTag = 1
+	// Message_fieldsTag is the tag number of the fields element in a message
+	// descriptor proto.
+	Message_fieldsTag = 2
+	// Message_nestedMessagesTag is the tag number of the nested messages
+	// element in a message descriptor proto.
+	Message_nestedMessagesTag = 3
+	// Message_enumsTag is the tag number of the enums element in a message
+	// descriptor proto.
+	Message_enumsTag = 4
+	// Message_extensionRangeTag is the tag number of the extension ranges
+	// element in a message descriptor proto.
+	Message_extensionRangeTag = 5
+	// Message_extensionsTag is the tag number of the extensions element in a
+	// message descriptor proto.
+	Message_extensionsTag = 6
+	// Message_optionsTag is the tag number of the options element in a message
+	// descriptor proto.
+	Message_optionsTag = 7
+	// Message_oneOfsTag is the tag number of the one-ofs element in a message
+	// descriptor proto.
+	Message_oneOfsTag = 8
+	// Message_reservedRangeTag is the tag number of the reserved ranges element
+	// in a message descriptor proto.
+	Message_reservedRangeTag = 9
+	// Message_reservedNameTag is the tag number of the reserved names element
+	// in a message descriptor proto.
+	Message_reservedNameTag = 10
+	// ExtensionRange_startTag is the tag number of the start index in an
+	// extension range proto.
+	ExtensionRange_startTag = 1
+	// ExtensionRange_endTag is the tag number of the end index in an
+	// extension range proto.
+	ExtensionRange_endTag = 2
+	// ExtensionRange_optionsTag is the tag number of the options element in an
+	// extension range proto.
+	ExtensionRange_optionsTag = 3
+	// ReservedRange_startTag is the tag number of the start index in a reserved
+	// range proto.
+	ReservedRange_startTag = 1
+	// ReservedRange_endTag is the tag number of the end index in a reserved
+	// range proto.
+	ReservedRange_endTag = 2
+	// Field_nameTag is the tag number of the name element in a field descriptor
+	// proto.
+	Field_nameTag = 1
+	// Field_extendeeTag is the tag number of the extendee element in a field
+	// descriptor proto.
+	Field_extendeeTag = 2
+	// Field_numberTag is the tag number of the number element in a field
+	// descriptor proto.
+	Field_numberTag = 3
+	// Field_labelTag is the tag number of the label element in a field
+	// descriptor proto.
+	Field_labelTag = 4
+	// Field_typeTag is the tag number of the type element in a field descriptor
+	// proto.
+	Field_typeTag = 5
+	// Field_typeNameTag is the tag number of the type name element in a field
+	// descriptor proto.
+	Field_typeNameTag = 6
+	// Field_defaultTag is the tag number of the default value element in a
+	// field descriptor proto.
+	Field_defaultTag = 7
+	// Field_optionsTag is the tag number of the options element in a field
+	// descriptor proto.
+	Field_optionsTag = 8
+	// Field_jsonNameTag is the tag number of the JSON name element in a field
+	// descriptor proto.
+	Field_jsonNameTag = 10
+	// OneOf_nameTag is the tag number of the name element in a one-of
+	// descriptor proto.
+	OneOf_nameTag = 1
+	// OneOf_optionsTag is the tag number of the options element in a one-of
+	// descriptor proto.
+	OneOf_optionsTag = 2
+	// Enum_nameTag is the tag number of the name element in an enum descriptor
+	// proto.
+	Enum_nameTag = 1
+	// Enum_valuesTag is the tag number of the values element in an enum
+	// descriptor proto.
+	Enum_valuesTag = 2
+	// Enum_optionsTag is the tag number of the options element in an enum
+	// descriptor proto.
+	Enum_optionsTag = 3
+	// Enum_reservedRangeTag is the tag number of the reserved ranges element in
+	// an enum descriptor proto.
+	Enum_reservedRangeTag = 4
+	// Enum_reservedNameTag is the tag number of the reserved names element in
+	// an enum descriptor proto.
+	Enum_reservedNameTag = 5
+	// EnumVal_nameTag is the tag number of the name element in an enum value
+	// descriptor proto.
+	EnumVal_nameTag = 1
+	// EnumVal_numberTag is the tag number of the number element in an enum
+	// value descriptor proto.
+	EnumVal_numberTag = 2
+	// EnumVal_optionsTag is the tag number of the options element in an enum
+	// value descriptor proto.
+	EnumVal_optionsTag = 3
+	// Service_nameTag is the tag number of the name element in a service
+	// descriptor proto.
+	Service_nameTag = 1
+	// Service_methodsTag is the tag number of the methods element in a service
+	// descriptor proto.
+	Service_methodsTag = 2
+	// Service_optionsTag is the tag number of the options element in a service
+	// descriptor proto.
+	Service_optionsTag = 3
+	// Method_nameTag is the tag number of the name element in a method
+	// descriptor proto.
+	Method_nameTag = 1
+	// Method_inputTag is the tag number of the input type element in a method
+	// descriptor proto.
+	Method_inputTag = 2
+	// Method_outputTag is the tag number of the output type element in a method
+	// descriptor proto.
+	Method_outputTag = 3
+	// Method_optionsTag is the tag number of the options element in a method
+	// descriptor proto.
+	Method_optionsTag = 4
+	// Method_inputStreamTag is the tag number of the input stream flag in a
+	// method descriptor proto.
+	Method_inputStreamTag = 5
+	// Method_outputStreamTag is the tag number of the output stream flag in a
+	// method descriptor proto.
+	Method_outputStreamTag = 6
+
+	// UninterpretedOptionsTag is the tag number of the uninterpreted options
+	// element. All *Options messages use the same tag for the field that stores
+	// uninterpreted options.
+	UninterpretedOptionsTag = 999
+
+	// Uninterpreted_nameTag is the tag number of the name element in an
+	// uninterpreted options proto.
+	Uninterpreted_nameTag = 2
+	// Uninterpreted_identTag is the tag number of the identifier value in an
+	// uninterpreted options proto.
+	Uninterpreted_identTag = 3
+	// Uninterpreted_posIntTag is the tag number of the positive int value in an
+	// uninterpreted options proto.
+	Uninterpreted_posIntTag = 4
+	// Uninterpreted_negIntTag is the tag number of the negative int value in an
+	// uninterpreted options proto.
+	Uninterpreted_negIntTag = 5
+	// Uninterpreted_doubleTag is the tag number of the double value in an
+	// uninterpreted options proto.
+	Uninterpreted_doubleTag = 6
+	// Uninterpreted_stringTag is the tag number of the string value in an
+	// uninterpreted options proto.
+	Uninterpreted_stringTag = 7
+	// Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+	// uninterpreted options proto.
+	Uninterpreted_aggregateTag = 8
+	// UninterpretedName_nameTag is the tag number of the name element in an
+	// uninterpreted option name proto.
+	UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+func JsonName(name string) string {
+	var js []rune
+	nextUpper := false
+	for i, r := range name {
+		if r == '_' {
+			nextUpper = true
+			continue
+		}
+		if i == 0 {
+			js = append(js, r)
+		} else if nextUpper {
+			nextUpper = false
+			js = append(js, unicode.ToUpper(r))
+		} else {
+			js = append(js, r)
+		}
+	}
+	return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+	r, sz := utf8.DecodeRuneInString(name)
+	return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+//   ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+	if pkg == "" {
+		return []string{""}
+	}
+
+	numDots := 0
+	// one pass to pre-allocate the returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			numDots++
+		}
+	}
+	if numDots == 0 {
+		return []string{pkg, ""}
+	}
+
+	prefixes := make([]string, numDots+2)
+	// second pass to fill in returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			prefixes[numDots] = pkg[:i]
+			numDots--
+		}
+	}
+	prefixes[0] = pkg
+
+	return prefixes
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..4a05830
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,341 @@
+package desc
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/internal"
+)
+
+var (
+	cacheMu       sync.RWMutex
+	filesCache    = map[string]*FileDescriptor{}
+	messagesCache = map[string]*MessageDescriptor{}
+	enumCache     = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+	return loadFileDescriptor(file, nil)
+}
+
+func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := getFileFromCache(file)
+	if f != nil {
+		return f, nil
+	}
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadFileDescriptorLocked(file, r)
+}
+
+func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := filesCache[file]
+	if f != nil {
+		return f, nil
+	}
+	fd, err := internal.LoadFileDescriptor(file)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err = toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(file, f)
+	return f, nil
+}
+
+func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
+	deps := make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, dep := range fd.GetDependency() {
+		resolvedDep := r.ResolveImport(fd.GetName(), dep)
+		var err error
+		deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
+		if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
+			// try original path
+			deps[i], err = loadFileDescriptorLocked(dep, r)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return CreateFileDescriptor(fd, deps...)
+}
+
+func getFileFromCache(file string) *FileDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return filesCache[file]
+}
+
+func putCacheLocked(filename string, fd *FileDescriptor) {
+	filesCache[filename] = fd
+	putMessageCacheLocked(fd.messages)
+}
+
+func putMessageCacheLocked(mds []*MessageDescriptor) {
+	for _, md := range mds {
+		messagesCache[md.fqn] = md
+		putMessageCacheLocked(md.nested)
+	}
+}
+
+// interface implemented by generated messages, which all have a Descriptor() method in
+// addition to the methods of proto.Message
+type protoMessage interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(message, nil)
+}
+
+func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
+	m := getMessageFromCache(message)
+	if m != nil {
+		return m, nil
+	}
+
+	pt := proto.MessageType(message)
+	if pt == nil {
+		return nil, nil
+	}
+	msg, err := messageFromType(pt)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(message, msg, r)
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(messageType, nil)
+}
+
+func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
+	m, err := messageFromType(messageType)
+	if err != nil {
+		return nil, err
+	}
+	return loadMessageDescriptorForMessage(m, r)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(message, nil)
+}
+
+func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
+	// efficiently handle dynamic messages
+	type descriptorable interface {
+		GetMessageDescriptor() *MessageDescriptor
+	}
+	if d, ok := message.(descriptorable); ok {
+		return d.GetMessageDescriptor(), nil
+	}
+
+	name := proto.MessageName(message)
+	if name == "" {
+		return nil, nil
+	}
+	m := getMessageFromCache(name)
+	if m != nil {
+		return m, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+}
+
+func messageFromType(mt reflect.Type) (protoMessage, error) {
+	if mt.Kind() != reflect.Ptr {
+		mt = reflect.PtrTo(mt)
+	}
+	m, ok := reflect.Zero(mt).Interface().(protoMessage)
+	if !ok {
+		return nil, fmt.Errorf("failed to create message from type: %v", mt)
+	}
+	return m, nil
+}
+
+func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
+	m := messagesCache[name]
+	if m != nil {
+		return m, nil
+	}
+
+	fdb, _ := message.Descriptor()
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err := toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(fd.GetName(), f)
+	return f.FindSymbol(name).(*MessageDescriptor), nil
+}
+
+func getMessageFromCache(message string) *MessageDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return messagesCache[message]
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+	EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, nil)
+}
+
+func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
+	// we cache descriptors using non-pointer type
+	if enumType.Kind() == reflect.Ptr {
+		enumType = enumType.Elem()
+	}
+	e := getEnumFromCache(enumType)
+	if e != nil {
+		return e, nil
+	}
+	enum, err := enumFromType(enumType)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, nil)
+}
+
+func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	et := reflect.TypeOf(enum)
+	// we cache descriptors using non-pointer type
+	if et.Kind() == reflect.Ptr {
+		et = et.Elem()
+		enum = reflect.Zero(et).Interface().(protoEnum)
+	}
+	e := getEnumFromCache(et)
+	if e != nil {
+		return e, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(et, enum, r)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+	if et.Kind() != reflect.Int32 {
+		et = reflect.PtrTo(et)
+	}
+	e, ok := reflect.Zero(et).Interface().(protoEnum)
+	if !ok {
+		return nil, fmt.Errorf("failed to create enum from type: %v", et)
+	}
+	return e, nil
+}
+
+func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	e := enumCache[et]
+	if e != nil {
+		return e, nil
+	}
+
+	fdb, path := enum.EnumDescriptor()
+	name := fmt.Sprintf("%v", et)
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+	// see if we already have cached "rich" descriptor
+	f, ok := filesCache[fd.GetName()]
+	if !ok {
+		f, err = toFileDescriptorLocked(fd, r)
+		if err != nil {
+			return nil, err
+		}
+		putCacheLocked(fd.GetName(), f)
+	}
+
+	ed := findEnum(f, path)
+	enumCache[et] = ed
+	return ed, nil
+}
+
+func getEnumFromCache(et reflect.Type) *EnumDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return enumCache[et]
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+	if len(path) == 1 {
+		return fd.GetEnumTypes()[path[0]]
+	}
+	md := fd.GetMessageTypes()[path[0]]
+	for _, i := range path[1 : len(path)-1] {
+		md = md.GetNestedMessageTypes()[i]
+	}
+	return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, nil)
+}
+
+func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
+	file, err := loadFileDescriptor(ext.Filename, r)
+	if err != nil {
+		return nil, err
+	}
+	field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+	// make sure descriptor agrees with attributes of the ExtensionDesc
+	if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+		field.GetNumber() != ext.Field {
+		return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+	}
+	return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..91fd672
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,185 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+	"fmt"
+	"github.com/golang/protobuf/proto"
+	"github.com/jhump/protoreflect/codec"
+	"io"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+	var b codec.Buffer
+	b.SetDeterministic(defaultDeterminism)
+	if err := m.marshal(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+	codedBuf := codec.NewBuffer(b)
+	codedBuf.SetDeterministic(defaultDeterminism)
+	if err := m.marshal(codedBuf); err != nil {
+		return nil, err
+	}
+	return codedBuf.Bytes(), nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+	var b codec.Buffer
+	b.SetDeterministic(true)
+	if err := m.marshal(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic,
+// except instead of allocating a new byte slice to marshal into, it uses the
+// provided byte slice. The backing array for the returned byte slice *may* be
+// the same as the one that was passed in, but it's not guaranteed as a new
+// backing array will automatically be allocated if more bytes need to be written
+// than the provided buffer has capacity for.
+func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) {
+	codedBuf := codec.NewBuffer(b)
+	codedBuf.SetDeterministic(true)
+	if err := m.marshal(codedBuf); err != nil {
+		return nil, err
+	}
+	return codedBuf.Bytes(), nil
+}
+
+func (m *Message) marshal(b *codec.Buffer) error {
+	if err := m.marshalKnownFields(b); err != nil {
+		return err
+	}
+	return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codec.Buffer) error {
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		val := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd == nil {
+			panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+		}
+		if err := b.EncodeFieldValue(fd, val); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codec.Buffer) error {
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		sl := m.unknownFields[itag]
+		for _, u := range sl {
+			if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil {
+				return err
+			}
+			switch u.Encoding {
+			case proto.WireBytes:
+				if err := b.EncodeRawBytes(u.Contents); err != nil {
+					return err
+				}
+			case proto.WireStartGroup:
+				_, _ = b.Write(u.Contents)
+				if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+					return err
+				}
+			case proto.WireFixed32:
+				if err := b.EncodeFixed32(u.Value); err != nil {
+					return err
+				}
+			case proto.WireFixed64:
+				if err := b.EncodeFixed64(u.Value); err != nil {
+					return err
+				}
+			case proto.WireVarint:
+				if err := b.EncodeVarint(u.Value); err != nil {
+					return err
+				}
+			default:
+				return codec.ErrBadWireType
+			}
+		}
+	}
+	return nil
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMerge(b); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+	return m.unmarshal(codec.NewBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+	for !buf.EOF() {
+		fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
+		if err != nil {
+			if err == codec.ErrWireTypeEndGroup {
+				if isGroup {
+					// finished parsing group
+					return nil
+				}
+				return codec.ErrBadWireType
+			}
+			return err
+		}
+
+		if fd == nil {
+			if m.unknownFields == nil {
+				m.unknownFields = map[int32][]UnknownField{}
+			}
+			uv := val.(codec.UnknownField)
+			u := UnknownField{
+				Encoding: uv.Encoding,
+				Value:    uv.Value,
+				Contents: uv.Contents,
+			}
+			m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u)
+		} else if err := mergeField(m, fd, val); err != nil {
+			return err
+		}
+	}
+	if isGroup {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..c329fcd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,163 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+//
+// Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+//
+// Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+//   1. A dynamic message is created with a descriptor, A, and then
+//      de-serialized from a stream of bytes. The stream includes an
+//      unrecognized tag T. The message will include tag T in its unrecognized
+//      field set.
+//   2. Another call site retrieves a newer descriptor, A', which includes a
+//      newly added field with tag T.
+//   3. That other call site then uses a FieldDescriptor to access the value of
+//      the new field. This will cause the dynamic message to parse the bytes
+//      for the unknown tag T and store them as a known field.
+//   4. Subsequent operations for tag T, including setting the field using only
+//      tag number or de-serializing a stream that includes tag T, will operate
+//      as if that tag were part of the original descriptor, A.
+//
+//
+// Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+//
+// Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..3f19d6b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2688 @@
+package dynamic
+
+import (
+	"bytes"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+	md            *desc.MessageDescriptor
+	er            *ExtensionRegistry
+	mf            *MessageFactory
+	extraFields   map[int32]*desc.FieldDescriptor
+	values        map[int32]interface{}
+	unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+	// Encoding indicates how the unknown field was encoded on the wire. If it
+	// is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+	// the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+	// significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+	// Value.
+	Encoding int8
+	Contents []byte
+	Value    uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+	var er *ExtensionRegistry
+	if mf != nil {
+		er = mf.er
+	}
+	return &Message{
+		md: md,
+		mf: mf,
+		er: er,
+	}
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+	return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+	if dm, ok := msg.(*Message); ok {
+		return dm, nil
+	}
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return nil, err
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	err = dm.mergeFrom(msg)
+	if err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+	return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+	if len(m.extraFields) == 0 {
+		return m.md.GetFields()
+	}
+	flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+	copy(flds, m.md.GetFields())
+	for _, fld := range m.extraFields {
+		if !fld.IsExtension() {
+			flds = append(flds, fld)
+		}
+	}
+	return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+	if !m.md.IsExtendable() {
+		return nil
+	}
+	exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+	for _, fld := range m.extraFields {
+		if fld.IsExtension() {
+			exts = append(exts, fld)
+		}
+	}
+	return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+	flds := make([]int32, 0, len(m.unknownFields))
+	for tag := range m.unknownFields {
+		flds = append(flds, tag)
+	}
+	return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+	// get encoded file descriptor
+	b, err := proto.Marshal(m.md.GetFile().AsProto())
+	if err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	var zippedBytes bytes.Buffer
+	w := gzip.NewWriter(&zippedBytes)
+	if _, err := w.Write(b); err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	if err := w.Close(); err != nil {
+		panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+
+	// and path to message
+	path := []int{}
+	var d desc.Descriptor
+	name := m.md.GetFullyQualifiedName()
+	for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+		found := false
+		switch d := d.(type) {
+		case (*desc.FileDescriptor):
+			for i, md := range d.GetMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		case (*desc.MessageDescriptor):
+			for i, md := range d.GetNestedMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		}
+		if !found {
+			panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+		}
+	}
+	// reverse the path
+	i := 0
+	j := len(path) - 1
+	for i < j {
+		path[i], path[j] = path[j], path[i]
+		i++
+		j--
+	}
+
+	return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+	return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+	fd := m.md.FindFieldByNumber(tagNumber)
+	if fd != nil {
+		return fd
+	}
+	fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+	if fd != nil {
+		return fd
+	}
+	return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+			return fd
+		}
+	}
+
+	return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByJSONName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+			return fd
+		}
+	}
+
+	// try non-JSON names
+	return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+	return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+	if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+		return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+	}
+	if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+		return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+	}
+	return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+	if v, err := m.TryGetField(fd); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+	if v, err := m.TryGetFieldByName(name); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+	if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+	return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+	res := m.values[fd.GetNumber()]
+	if res == nil {
+		var err error
+		if res, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		}
+		if res == nil {
+			if nilIfAbsent {
+				return nil, nil
+			} else {
+				def := fd.GetDefaultValue()
+				if def != nil {
+					return def, nil
+				}
+				// GetDefaultValue only returns nil for message types
+				md := fd.GetMessageType()
+				if md.IsProto3() {
+					// try to return a proper nil pointer
+					msgType := proto.MessageType(md.GetFullyQualifiedName())
+					if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+						return reflect.Zero(msgType).Interface().(proto.Message), nil
+					}
+					// fallback to nil dynamic message pointer
+					return (*Message)(nil), nil
+				} else {
+					// for proto2, return default instance of message
+					return m.mf.NewMessage(md), nil
+				}
+			}
+		}
+	}
+	rt := reflect.TypeOf(res)
+	if rt.Kind() == reflect.Map {
+		// make defensive copies to prevent caller from storing illegal keys and values
+		m := res.(map[interface{}]interface{})
+		res := map[interface{}]interface{}{}
+		for k, v := range m {
+			res[k] = v
+		}
+		return res, nil
+	} else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+		// make defensive copies to prevent caller from storing illegal elements
+		sl := res.([]interface{})
+		res := make([]interface{}, len(sl))
+		copy(res, sl)
+		return res, nil
+	}
+	return res, nil
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+	if err := m.checkField(fd); err != nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+	if _, ok := m.values[int32(tagNumber)]; ok {
+		return true
+	}
+	_, ok := m.unknownFields[int32(tagNumber)]
+	return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TrySetField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field  is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+//  1. If a numeric type is provided that can be converted *without loss or
+//     overflow*, it is accepted. This allows for setting int64 fields using int
+//     or int32 values. Similarly for uint64 with uint and uint32 values and for
+//     float64 fields with float32 values.
+//  2. The value can be a named type, as long as its underlying type is correct.
+//  3. Map and repeated fields can be set using any kind of concrete map or
+//     slice type, as long as the values within are all of the correct type. So
+//     a field defined as a 'map<string, int32>` can be set using a
+//     map[string]int32, a map[string]interface{}, or even a
+//     map[interface{}]interface{}.
+//  4. Finally, dynamic code that chooses to not treat maps as a special-case
+//     find that they can set map fields using a slice where each element is a
+//     message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+	if err := m.TrySetFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+	m.internalSetField(fd, val)
+	return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+	if fd.IsRepeated() {
+		// Unset fields and zero-length fields are indistinguishable, in both
+		// proto2 and proto3 syntax
+		if reflect.ValueOf(val).Len() == 0 {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	} else if m.md.IsProto3() && fd.GetOneOf() == nil {
+		// proto3 considers fields that are set to their zero value as unset
+		// (we already handled repeated fields above)
+		var equal bool
+		if b, ok := val.([]byte); ok {
+			// can't compare slices, so we have to special-case []byte values
+			equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+		} else {
+			defVal := fd.GetDefaultValue()
+			equal = defVal == val
+			if !equal && defVal == nil {
+				// above just checks if value is the nil interface,
+				// but we should also test if the given value is a
+				// nil pointer
+				rv := reflect.ValueOf(val)
+				if rv.Kind() == reflect.Ptr && rv.IsNil() {
+					equal = true
+				}
+			}
+		}
+		if equal {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	}
+	if m.values == nil {
+		m.values = map[int32]interface{}{}
+	}
+	m.values[fd.GetNumber()] = val
+	// if this field is part of a one-of, make sure all other one-of choices are cleared
+	od := fd.GetOneOf()
+	if od != nil {
+		for _, other := range od.GetChoices() {
+			if other.GetNumber() != fd.GetNumber() {
+				delete(m.values, other.GetNumber())
+			}
+		}
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+	if m.extraFields == nil {
+		m.extraFields = map[int32]*desc.FieldDescriptor{}
+	}
+	m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+	if err := m.TryClearField(fd); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+	if err := m.TryClearFieldByName(name); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+	if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+	// clear value
+	if m.values != nil {
+		delete(m.values, fd.GetNumber())
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+	if fd, val, err := m.TryGetOneOfField(od); err != nil {
+		panic(err.Error())
+	} else {
+		return fd, val
+	}
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		val, err := m.doGetField(fd, true)
+		if err != nil {
+			return nil, nil, err
+		}
+		if val != nil {
+			return fd, val, nil
+		}
+	}
+	return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+	if err := m.TryClearOneOfField(od); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		m.clearField(fd)
+	}
+	return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+	if v, err := m.TryGetMapField(fd, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if !fd.IsMap() {
+		return nil, FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return nil, err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if mp == nil {
+			return nil, nil
+		}
+	}
+	return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a  map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err := m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	for k, v := range mp.(map[interface{}]interface{}) {
+		if !fn(k, v) {
+			break
+		}
+	}
+	return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+	if err := m.TryPutMapField(fd, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return err
+	}
+	vfd := fd.GetMessageType().GetFields()[1]
+	vi, err := validElementFieldValue(vfd, val)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+			return nil
+		}
+	}
+	mp.(map[interface{}]interface{})[ki] = vi
+	return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+	if err := m.TryRemoveMapField(fd, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+	if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+	if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	res := mp.(map[interface{}]interface{})
+	delete(res, ki)
+	if len(res) == 0 {
+		delete(m.values, fd.GetNumber())
+	}
+	return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+	l, err := m.TryFieldLength(fd)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if err := m.checkField(fd); err != nil {
+		return 0, err
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+	l, err := m.TryFieldLengthByName(name)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return 0, UnknownFieldNameError
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+	l, err := m.TryFieldLengthByNumber(tagNumber)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return 0, UnknownTagNumberError
+	}
+	return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if !fd.IsRepeated() {
+		return 0, FieldIsNotRepeatedError
+	}
+	val := m.values[fd.GetNumber()]
+	if val == nil {
+		var err error
+		if val, err = m.parseUnknownField(fd); err != nil {
+			return 0, err
+		} else if val == nil {
+			return 0, nil
+		}
+	}
+	if sl, ok := val.([]interface{}); ok {
+		return len(sl), nil
+	} else if mp, ok := val.(map[interface{}]interface{}); ok {
+		return len(mp), nil
+	}
+	return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+	if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return nil, FieldIsNotRepeatedError
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		var err error
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if sl == nil {
+			return nil, IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return nil, IndexOutOfRangeError
+	}
+	return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TryAddRepeatedField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+	if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val)
+	if err != nil {
+		return err
+	}
+
+	if fd.IsMap() {
+		// We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+		// adding entries one at a time (as if the field were a normal repeated field).
+		msg := val.(proto.Message)
+		dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+		if err != nil {
+			return err
+		}
+		k, err := dm.TryGetFieldByNumber(1)
+		if err != nil {
+			return err
+		}
+		v, err := dm.TryGetFieldByNumber(2)
+		if err != nil {
+			return err
+		}
+		return m.putMapField(fd, k, v)
+	}
+
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			sl = []interface{}{}
+		}
+	}
+	res := sl.([]interface{})
+	res = append(res, val)
+	m.internalSetField(fd, res)
+	return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+	if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val)
+	if err != nil {
+		return err
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			return IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return IndexOutOfRangeError
+	}
+	res[index] = val
+	return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+	if u, ok := m.unknownFields[tagNumber]; ok {
+		return u
+	} else {
+		return nil
+	}
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+	unks, ok := m.unknownFields[fd.GetNumber()]
+	if !ok {
+		return nil, nil
+	}
+	var v interface{}
+	var sl []interface{}
+	var mp map[interface{}]interface{}
+	if fd.IsMap() {
+		mp = map[interface{}]interface{}{}
+	}
+	var err error
+	for _, unk := range unks {
+		var val interface{}
+		if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+			val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf)
+		} else {
+			val, err = codec.DecodeScalarField(fd, unk.Value)
+		}
+		if err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			newEntry := val.(*Message)
+			kk, err := newEntry.TryGetFieldByNumber(1)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := newEntry.TryGetFieldByNumber(2)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+			v = mp
+		} else if fd.IsRepeated() {
+			t := reflect.TypeOf(val)
+			if t.Kind() == reflect.Slice && t != typeOfBytes {
+				// append slices if we unmarshalled a packed repeated field
+				newVals := val.([]interface{})
+				sl = append(sl, newVals...)
+			} else {
+				sl = append(sl, val)
+			}
+			v = sl
+		} else {
+			v = val
+		}
+	}
+	m.internalSetField(fd, v)
+	return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+	return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	if fd.IsMap() && val.Kind() == reflect.Map {
+		return validFieldValueForMapField(fd, val)
+	}
+
+	if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+		if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+			if fd.IsMap() {
+				return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+			} else {
+				return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+			}
+		}
+
+		if fd.IsMap() {
+			// value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+			m := map[interface{}]interface{}{}
+			for i := 0; i < val.Len(); i++ {
+				e, err := validElementFieldValue(fd, val.Index(i).Interface())
+				if err != nil {
+					return nil, err
+				}
+				msg := e.(proto.Message)
+				dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+				if err != nil {
+					return nil, err
+				}
+				k, err := dm.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				v, err := dm.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				m[k] = v
+			}
+			return m, nil
+		}
+
+		// make a defensive copy while checking contents (also converts to []interface{})
+		s := make([]interface{}, val.Len())
+		for i := 0; i < val.Len(); i++ {
+			ev := val.Index(i)
+			if ev.Kind() == reflect.Interface {
+				// unwrap it
+				ev = reflect.ValueOf(ev.Interface())
+			}
+			e, err := validElementFieldValueForRv(fd, ev)
+			if err != nil {
+				return nil, err
+			}
+			s[i] = e
+		}
+
+		return s, nil
+	}
+
+	return validElementFieldValueForRv(fd, val)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+	if dm, ok := m.(*Message); ok {
+		return dm, nil
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	if err := dm.mergeFrom(m); err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+	return validElementFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	t := fd.GetType()
+	if !val.IsValid() {
+		return nil, typeError(fd, nil)
+	}
+
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		return toInt32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return toInt64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32:
+		return toUint32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return toUint64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return toFloat32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return toFloat64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return toBool(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return toBytes(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return toString(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+		m, err := asMessage(val, fd.GetFullyQualifiedName())
+		// check that message is correct type
+		if err != nil {
+			return nil, err
+		}
+		var msgType string
+		if dm, ok := m.(*Message); ok {
+			msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+		} else {
+			msgType = proto.MessageName(m)
+		}
+		if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+			return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+		}
+		return m, nil
+
+	default:
+		return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+	if v.Kind() == reflect.Int32 {
+		return int32(v.Int()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+	if v.Kind() == reflect.Uint32 {
+		return uint32(v.Uint()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+	if v.Kind() == reflect.Float32 {
+		return float32(v.Float()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+	if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+		return v.Int(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+	if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+		return v.Uint(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+	if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+		return v.Float(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+	if v.Kind() == reflect.Bool {
+		return v.Bool(), nil
+	}
+	return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+		return v.Bytes(), nil
+	}
+	return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+	if v.Kind() == reflect.String {
+		return v.String(), nil
+	}
+	return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+	return fmt.Errorf(
+		"%s field %s is not compatible with value of type %v",
+		getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+	return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+	t := v.Type()
+	// we need a pointer to a struct that implements proto.Message
+	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+		return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+	}
+	return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+	for k := range m.values {
+		delete(m.values, k)
+	}
+	for k := range m.unknownFields {
+		delete(m.unknownFields, k)
+	}
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+	b, err := m.MarshalText()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+	}
+	return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+//   target.Reset()
+//   m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	target.Reset()
+	return m.mergeInto(target)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+//   m.Reset()
+//   m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	m.Reset()
+	return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+	return m.mergeInto(target)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+	if err := m.checkType(source); err != nil {
+		return err
+	}
+	return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+	if m.md == nil {
+		// To support proto.Clone, initialize the descriptor from the source.
+		if dm, ok := source.(*Message); ok {
+			m.md = dm.md
+			// also make sure the clone uses the same message factory and
+			// extensions and also knows about the same extra fields (if any)
+			m.mf = dm.mf
+			m.er = dm.er
+			m.extraFields = dm.extraFields
+		} else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+			panic(err.Error())
+		} else {
+			m.md = md
+		}
+	}
+
+	if err := m.MergeFrom(source); err != nil {
+		panic(err.Error())
+	}
+}
+
+func (m *Message) checkType(target proto.Message) error {
+	if dm, ok := target.(*Message); ok {
+		if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+			return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+		}
+		return nil
+	}
+
+	msgName := proto.MessageName(target)
+	if msgName != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+	}
+	return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message) error {
+	if dm, ok := pm.(*Message); ok {
+		return dm.mergeFrom(m)
+	}
+
+	target := reflect.ValueOf(pm)
+	if target.Kind() == reflect.Ptr {
+		target = target.Elem()
+	}
+
+	// track tags for which the dynamic message has data but the given
+	// message doesn't know about it
+	u := target.FieldByName("XXX_unrecognized")
+	var unknownTags map[int32]struct{}
+	if u.IsValid() && u.Type() == typeOfBytes {
+		unknownTags = map[int32]struct{}{}
+		for tag := range m.values {
+			unknownTags[tag] = struct{}{}
+		}
+	}
+
+	// check that we can successfully do the merge
+	structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	for _, prop := range structProps.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		f := target.FieldByName(prop.Name)
+		ft := f.Type()
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// check one-of fields
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+		if !ok {
+			return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+		}
+		ft := stf.Type
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// and check extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		ft := reflect.TypeOf(ext.ExtensionType)
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+
+	// now actually perform the merge
+	for _, prop := range structProps.Prop {
+		v, ok := m.values[int32(prop.Tag)]
+		if !ok {
+			continue
+		}
+		f := target.FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+			return err
+		}
+	}
+	// merge one-ofs
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		oov := reflect.New(oop.Type.Elem())
+		f := oov.Elem().FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+			return err
+		}
+		target.Field(oop.Field).Set(oov)
+	}
+	// merge extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+		if err := mergeVal(reflect.ValueOf(v), e); err != nil {
+			return err
+		}
+		if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+			// shouldn't happen since we already checked that the extension type was compatible above
+			return err
+		}
+	}
+
+	// if we have fields that the given message doesn't know about, add to its unknown fields
+	if len(unknownTags) > 0 {
+		ub := u.Interface().([]byte)
+		var b codec.Buffer
+		b.SetDeterministic(defaultDeterminism)
+		for tag := range unknownTags {
+			fd := m.FindFieldDescriptor(tag)
+			if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+				return err
+			}
+		}
+		ub = append(ub, b.Bytes()...)
+		u.Set(reflect.ValueOf(ub))
+	}
+
+	// finally, convey unknown fields into the given message by letting it unmarshal them
+	// (this will append to its unknown fields if not known; if somehow the given message recognizes
+	// a field even though the dynamic message did not, it will get correctly unmarshalled)
+	if unknownTags != nil && len(m.unknownFields) > 0 {
+		var b codec.Buffer
+		_ = m.marshalUnknownFields(&b)
+		_ = proto.UnmarshalMerge(b.Bytes(), pm)
+	}
+
+	return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+	if src.Kind() == reflect.Interface {
+		src = reflect.ValueOf(src.Interface())
+	}
+	srcType := src.Type()
+	// we allow convertible types instead of requiring exact types so that calling
+	// code can, for example, assign an enum constant to an enum field. In that case,
+	// one type is the enum type (a sub-type of int32) and the other may be the int32
+	// type. So we automatically do the conversion in that case.
+	if srcType.ConvertibleTo(target) {
+		return true
+	} else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+		return true
+	} else if target.Kind() == reflect.Slice {
+		if srcType.Kind() != reflect.Slice {
+			return false
+		}
+		et := target.Elem()
+		for i := 0; i < src.Len(); i++ {
+			if !canConvert(src.Index(i), et) {
+				return false
+			}
+		}
+		return true
+	} else if target.Kind() == reflect.Map {
+		if srcType.Kind() != reflect.Map {
+			return false
+		}
+		return canConvertMap(src, target)
+	} else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+		z := reflect.Zero(target).Interface()
+		msgType := proto.MessageName(z.(proto.Message))
+		return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+	} else {
+		return false
+	}
+}
+
+func mergeVal(src, target reflect.Value) error {
+	if src.Kind() == reflect.Interface && !src.IsNil() {
+		src = src.Elem()
+	}
+	srcType := src.Type()
+	targetType := target.Type()
+	if srcType.ConvertibleTo(targetType) {
+		if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+			Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+		} else {
+			target.Set(src.Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+		if !src.CanAddr() {
+			target.Set(reflect.New(targetType.Elem()))
+			target.Elem().Set(src.Convert(targetType.Elem()))
+		} else {
+			target.Set(src.Addr().Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Slice {
+		l := target.Len()
+		newL := l + src.Len()
+		if target.Cap() < newL {
+			// expand capacity of the slice and copy
+			newSl := reflect.MakeSlice(targetType, newL, newL)
+			for i := 0; i < target.Len(); i++ {
+				newSl.Index(i).Set(target.Index(i))
+			}
+			target.Set(newSl)
+		} else {
+			target.SetLen(newL)
+		}
+		for i := 0; i < src.Len(); i++ {
+			dest := target.Index(l + i)
+			if dest.Kind() == reflect.Ptr {
+				dest.Set(reflect.New(dest.Type().Elem()))
+			}
+			if err := mergeVal(src.Index(i), dest); err != nil {
+				return err
+			}
+		}
+	} else if targetType.Kind() == reflect.Map {
+		return mergeMapVal(src, target, targetType)
+	} else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+		dm := src.Interface().(*Message)
+		if target.IsNil() {
+			target.Set(reflect.New(targetType.Elem()))
+		}
+		m := target.Interface().(proto.Message)
+		if err := dm.mergeInto(m); err != nil {
+			return err
+		}
+	} else {
+		return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+	}
+	return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+	if dm, ok := pm.(*Message); ok {
+		// if given message is also a dynamic message, we merge differently
+		for tag, v := range dm.values {
+			fd := m.FindFieldDescriptor(tag)
+			if fd == nil {
+				fd = dm.FindFieldDescriptor(tag)
+			}
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	pmrv := reflect.ValueOf(pm)
+	if pmrv.IsNil() {
+		// nil is an empty message, so nothing to do
+		return nil
+	}
+
+	// check that we can successfully do the merge
+	src := pmrv.Elem()
+	values := map[*desc.FieldDescriptor]interface{}{}
+	props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	if props == nil {
+		return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+	}
+
+	// regular fields
+	for _, prop := range props.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+			}
+		}
+		rv := src.FieldByName(prop.Name)
+		if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+			continue
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// one-of fields
+	for _, oop := range props.OneofTypes {
+		oov := src.Field(oop.Field).Elem()
+		if !oov.IsValid() || oov.Type() != oop.Type {
+			// this field is unset (in other words, one-of message field is not currently set to this option)
+			continue
+		}
+		prop := oop.Prop
+		rv := oov.Elem().FieldByName(prop.Name)
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+			}
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// extension fields
+	rexts, _ := proto.ExtensionDescs(pm)
+	var unknownExtensions []byte
+	for _, ed := range rexts {
+		v, _ := proto.GetExtension(pm, ed)
+		if v == nil {
+			continue
+		}
+		if ed.ExtensionType == nil {
+			extBytes, _ := v.([]byte)
+			if len(extBytes) > 0 {
+				unknownExtensions = append(unknownExtensions, extBytes...)
+			}
+			continue
+		}
+		fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+		if fd == nil {
+			var err error
+			if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+				return err
+			}
+		}
+		if v, err := validFieldValue(fd, v); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// now actually perform the merge
+	for fd, v := range values {
+		mergeField(m, fd, v)
+	}
+
+	u := src.FieldByName("XXX_unrecognized")
+	if u.IsValid() && u.Type() == typeOfBytes {
+		// ignore any error returned: pulling in unknown fields is best-effort
+		_ = m.UnmarshalMerge(u.Interface().([]byte))
+	}
+
+	// lastly, also extract any unknown extensions the message may have (unknown extensions
+	// are stored with other extensions, not in the XXX_unrecognized field, so we have to do
+	// more than just the step above...)
+	if len(unknownExtensions) > 0 {
+		// pulling in unknown fields is best-effort, so we just ignore errors
+		_ = m.UnmarshalMerge(unknownExtensions)
+	}
+	return nil
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+	missingFields := m.findMissingFields()
+	if len(missingFields) == 0 {
+		return nil
+	}
+	return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+	if m.md.IsProto3() {
+		// proto3 does not allow required fields
+		return nil
+	}
+	var missingFields []string
+	for _, fd := range m.md.GetFields() {
+		if fd.IsRequired() {
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				missingFields = append(missingFields, fd.GetName())
+			}
+		}
+	}
+	return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+	return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+	if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+		for i := range missingFields {
+			missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+		}
+		return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+	}
+
+	for tag, fld := range m.values {
+		fd := m.FindFieldDescriptor(tag)
+		var chprefix string
+		var md *desc.MessageDescriptor
+		checkMsg := func(pm proto.Message) error {
+			var dm *Message
+			if d, ok := pm.(*Message); ok {
+				dm = d
+			} else {
+				dm = m.mf.NewDynamicMessage(md)
+				if err := dm.ConvertFrom(pm); err != nil {
+					return nil
+				}
+			}
+			if err := dm.validateRecursive(chprefix); err != nil {
+				return err
+			}
+			return nil
+		}
+		isMap := fd.IsMap()
+		if isMap && fd.GetMapValueType().GetMessageType() != nil {
+			md = fd.GetMapValueType().GetMessageType()
+			mp := fld.(map[interface{}]interface{})
+			for k, v := range mp {
+				chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+				if err := checkMsg(v.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		} else if !isMap && fd.GetMessageType() != nil {
+			md = fd.GetMessageType()
+			if fd.IsRepeated() {
+				sl := fld.([]interface{})
+				for i, v := range sl {
+					chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+					if err := checkMsg(v.(proto.Message)); err != nil {
+						return err
+					}
+				}
+			} else {
+				chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+				if err := checkMsg(fld.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+	if fd.IsExtension() {
+		return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+	} else {
+		return fd.GetName()
+	}
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+	if len(m.values) == 0 {
+		return []int(nil)
+	}
+
+	keys := make([]int, len(m.values))
+	i := 0
+	for k := range m.values {
+		keys[i] = int(k)
+		i++
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+	fds := m.md.GetFields()
+	keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+	for k := range m.values {
+		keys = append(keys, int(k))
+	}
+
+	// also include known fields that are not present
+	for _, fd := range fds {
+		if _, ok := m.values[fd.GetNumber()]; !ok {
+			keys = append(keys, int(fd.GetNumber()))
+		}
+	}
+	for _, fd := range m.extraFields {
+		if !fd.IsExtension() { // skip extensions that are not present
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				keys = append(keys, int(fd.GetNumber()))
+			}
+		}
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+	if len(m.unknownFields) == 0 {
+		return []int(nil)
+	}
+	keys := make([]int, len(m.unknownFields))
+	i := 0
+	for k := range m.unknownFields {
+		keys[i] = int(k)
+		i++
+	}
+	sort.Ints(keys)
+	return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..5fbcc24
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,152 @@
+package dynamic
+
+import (
+	"bytes"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+	if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+		return false
+	}
+	if len(a.values) != len(b.values) {
+		return false
+	}
+	if len(a.unknownFields) != len(b.unknownFields) {
+		return false
+	}
+	for tag, aval := range a.values {
+		bval, ok := b.values[tag]
+		if !ok {
+			return false
+		}
+		if !fieldsEqual(aval, bval) {
+			return false
+		}
+	}
+	for tag, au := range a.unknownFields {
+		bu, ok := b.unknownFields[tag]
+		if !ok {
+			return false
+		}
+		if len(au) != len(bu) {
+			return false
+		}
+		for i, aval := range au {
+			bval := bu[i]
+			if aval.Encoding != bval.Encoding {
+				return false
+			}
+			if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+				if !bytes.Equal(aval.Contents, bval.Contents) {
+					return false
+				}
+			} else if aval.Value != bval.Value {
+				return false
+			}
+		}
+	}
+	// all checks pass!
+	return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+	arv := reflect.ValueOf(aval)
+	brv := reflect.ValueOf(bval)
+	if arv.Type() != brv.Type() {
+		// it is possible that one is a dynamic message and one is not
+		apm, ok := aval.(proto.Message)
+		if !ok {
+			return false
+		}
+		bpm, ok := bval.(proto.Message)
+		if !ok {
+			return false
+		}
+		return MessagesEqual(apm, bpm)
+
+	} else {
+		switch arv.Kind() {
+		case reflect.Ptr:
+			apm, ok := aval.(proto.Message)
+			if !ok {
+				// Don't know how to compare pointer values that aren't messages!
+				// Maybe this should panic?
+				return false
+			}
+			bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+			return MessagesEqual(apm, bpm)
+
+		case reflect.Map:
+			return mapsEqual(arv, brv)
+
+		case reflect.Slice:
+			if arv.Type() == typeOfBytes {
+				return bytes.Equal(aval.([]byte), bval.([]byte))
+			} else {
+				return slicesEqual(arv, brv)
+			}
+
+		default:
+			return aval == bval
+		}
+	}
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	for i := 0; i < a.Len(); i++ {
+		ai := a.Index(i)
+		bi := b.Index(i)
+		if !fieldsEqual(ai.Interface(), bi.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+	da, aok := a.(*Message)
+	db, bok := b.(*Message)
+	// Both dynamic messages
+	if aok && bok {
+		return Equal(da, db)
+	}
+	// Neither dynamic messages
+	if !aok && !bok {
+		return proto.Equal(a, b)
+	}
+	// Mixed
+	if aok {
+		md, err := desc.LoadMessageDescriptorForMessage(b)
+		if err != nil {
+			return false
+		}
+		db = NewMessageWithMessageFactory(md, da.mf)
+		if db.ConvertFrom(b) != nil {
+			return false
+		}
+		return Equal(da, db)
+	} else {
+		md, err := desc.LoadMessageDescriptorForMessage(a)
+		if err != nil {
+			return false
+		}
+		da = NewMessageWithMessageFactory(md, db.mf)
+		if da.ConvertFrom(a) != nil {
+			return false
+		}
+		return Equal(da, db)
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..1d38161
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,46 @@
+package dynamic
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+	if !extd.IsExtension() {
+		return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+	}
+
+	if dm, ok := msg.(*Message); ok {
+		return dm.TrySetField(extd, val)
+	}
+
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return err
+	}
+	if err := checkField(extd, md); err != nil {
+		return err
+	}
+
+	val, err = validFieldValue(extd, val)
+	if err != nil {
+		return err
+	}
+
+	var b codec.Buffer
+	b.SetDeterministic(defaultDeterminism)
+	if err := b.EncodeFieldValue(extd, val); err != nil {
+		return err
+	}
+	proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes())
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+	includeDefault bool
+	mu             sync.RWMutex
+	exts           map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+	return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+	flds := make([]*desc.FieldDescriptor, len(exts))
+	for i, ext := range exts {
+		fd, err := desc.LoadFieldDescriptorForExtension(ext)
+		if err != nil {
+			return err
+		}
+		flds[i] = fd
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, fd := range flds {
+		r.putExtensionLocked(fd)
+	}
+	return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+	for _, ext := range exts {
+		if !ext.IsExtension() {
+			return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+		}
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range exts {
+		r.putExtensionLocked(ext)
+	}
+	return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	already := map[*desc.FileDescriptor]struct{}{}
+	r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+	if _, ok := alreadySeen[fd]; ok {
+		return
+	}
+
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range fd.GetExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range fd.GetMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+
+	if recursive {
+		alreadySeen[fd] = struct{}{}
+		for _, dep := range fd.GetDependencies() {
+			r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+		}
+	}
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+	for _, ext := range md.GetNestedExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range md.GetNestedMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+	msgName := fd.GetOwner().GetFullyQualifiedName()
+	m := r.exts[msgName]
+	if m == nil {
+		m = map[int32]*desc.FieldDescriptor{}
+		r.exts[msgName] = m
+	}
+	m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	fd := r.exts[messageName][tagNumber]
+	if fd == nil && r.includeDefault {
+		ext := getDefaultExtensions(messageName)[tagNumber]
+		if ext != nil {
+			fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+		}
+	}
+	return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedJSONName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedJSONName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+	t := proto.MessageType(messageName)
+	if t != nil {
+		msg := reflect.Zero(t).Interface().(proto.Message)
+		return proto.RegisteredExtensions(msg)
+	}
+	return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+	if r == nil {
+		return []*desc.FieldDescriptor(nil)
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	flds := r.exts[messageName]
+	var ret []*desc.FieldDescriptor
+	if r.includeDefault {
+		exts := getDefaultExtensions(messageName)
+		if len(exts) > 0 || len(flds) > 0 {
+			ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+		}
+		for tag, ext := range exts {
+			if _, ok := flds[tag]; ok {
+				// skip default extension and use the one explicitly registered instead
+				continue
+			}
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd != nil {
+				ret = append(ret, fd)
+			}
+		}
+	} else if len(flds) > 0 {
+		ret = make([]*desc.FieldDescriptor, 0, len(flds))
+	}
+
+	for _, ext := range flds {
+		ret = append(ret, ext)
+	}
+	return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+	bytes.Buffer
+	indent      string
+	indentCount int
+	comma       bool
+}
+
+func (b *indentBuffer) start() error {
+	if b.indentCount >= 0 {
+		b.indentCount++
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) sep() error {
+	if b.indentCount >= 0 {
+		_, err := b.WriteString(": ")
+		return err
+	} else {
+		return b.WriteByte(':')
+	}
+}
+
+func (b *indentBuffer) end() error {
+	if b.indentCount >= 0 {
+		b.indentCount--
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+	if *first {
+		*first = false
+		return nil
+	} else {
+		return b.next()
+	}
+}
+
+func (b *indentBuffer) next() error {
+	if b.indentCount >= 0 {
+		return b.newLine(b.comma)
+	} else if b.comma {
+		return b.WriteByte(',')
+	} else {
+		return b.WriteByte(' ')
+	}
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+	if comma {
+		err := b.WriteByte(',')
+		if err != nil {
+			return err
+		}
+	}
+
+	err := b.WriteByte('\n')
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i < b.indentCount; i++ {
+		_, err := b.WriteString(b.indent)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..7dfae09
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1238 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	// link in the well-known-types that have a special JSON format
+	_ "github.com/golang/protobuf/ptypes/any"
+	_ "github.com/golang/protobuf/ptypes/duration"
+	_ "github.com/golang/protobuf/ptypes/empty"
+	_ "github.com/golang/protobuf/ptypes/struct"
+	_ "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/golang/protobuf/ptypes/wrappers"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+	"google.protobuf.Any":       {},
+	"google.protobuf.Empty":     {},
+	"google.protobuf.Duration":  {},
+	"google.protobuf.Timestamp": {},
+	// struct.proto
+	"google.protobuf.Struct":    {},
+	"google.protobuf.Value":     {},
+	"google.protobuf.ListValue": {},
+	// wrappers.proto
+	"google.protobuf.DoubleValue": {},
+	"google.protobuf.FloatValue":  {},
+	"google.protobuf.Int64Value":  {},
+	"google.protobuf.UInt64Value": {},
+	"google.protobuf.Int32Value":  {},
+	"google.protobuf.UInt32Value": {},
+	"google.protobuf.BoolValue":   {},
+	"google.protobuf.StringValue": {},
+	"google.protobuf.BytesValue":  {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+	var b indentBuffer
+	b.indent = opts.Indent
+	if len(opts.Indent) == 0 {
+		b.indentCount = -1
+	}
+	b.comma = true
+	if err := m.marshalJSON(&b, opts); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := marshalWellKnownType(m, b, opts); ok {
+		return err
+	}
+
+	err := b.WriteByte('{')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	var tags []int
+	if opts.EmitDefaults {
+		tags = m.allKnownFieldTags()
+	} else {
+		tags = m.knownFieldTags()
+	}
+
+	first := true
+
+	for _, tag := range tags {
+		itag := int32(tag)
+		fd := m.FindFieldDescriptor(itag)
+
+		v, ok := m.values[itag]
+		if !ok {
+			if fd.GetOneOf() != nil {
+				// don't print defaults for fields in a oneof
+				continue
+			}
+			v = fd.GetDefaultValue()
+		}
+
+		err := b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		err = marshalKnownFieldJSON(b, fd, v, opts)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	err = b.WriteByte('}')
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// convert dynamic message to well-known type and let jsonpb marshal it
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := m.MergeInto(msg); err != nil {
+		return true, err
+	}
+	return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	var jsonName string
+	if opts.OrigName {
+		jsonName = fd.GetName()
+	} else {
+		jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+		if jsonName == "" {
+			jsonName = fd.GetName()
+		}
+	}
+	if fd.IsExtension() {
+		var scope string
+		switch parent := fd.GetParent().(type) {
+		case *desc.FileDescriptor:
+			scope = parent.GetPackage()
+		default:
+			scope = parent.GetFullyQualifiedName()
+		}
+		if scope == "" {
+			jsonName = fmt.Sprintf("[%s]", jsonName)
+		} else {
+			jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+		}
+	}
+	err := writeJsonString(b, jsonName)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	if isNil(v) {
+		_, err := b.WriteString("null")
+		return err
+	}
+
+	if fd.IsMap() {
+		err = b.WriteByte('{')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		md := fd.GetMessageType()
+		vfd := md.FindFieldByNumber(2)
+
+		mp := v.(map[interface{}]interface{})
+		keys := make([]interface{}, 0, len(mp))
+		for k := range mp {
+			keys = append(keys, k)
+		}
+		sort.Sort(sortable(keys))
+		first := true
+		for _, mk := range keys {
+			mv := mp[mk]
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+
+			err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte('}')
+
+	} else if fd.IsRepeated() {
+		err = b.WriteByte('[')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		sl := v.([]interface{})
+		first := true
+		for _, slv := range sl {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte(']')
+
+	} else {
+		return marshalKnownFieldValueJSON(b, fd, v, opts)
+	}
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+	return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+	vi := s[i]
+	vj := s[j]
+	switch reflect.TypeOf(vi).Kind() {
+	case reflect.Int32:
+		return vi.(int32) < vj.(int32)
+	case reflect.Int64:
+		return vi.(int64) < vj.(int64)
+	case reflect.Uint32:
+		return vi.(uint32) < vj.(uint32)
+	case reflect.Uint64:
+		return vi.(uint64) < vj.(uint64)
+	case reflect.String:
+		return vi.(string) < vj.(string)
+	case reflect.Bool:
+		return !vi.(bool) && vj.(bool)
+	default:
+		panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+	}
+}
+
+func (s sortable) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func isNil(v interface{}) bool {
+	if v == nil {
+		return true
+	}
+	rv := reflect.ValueOf(v)
+	return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+	rk := reflect.ValueOf(mk)
+	var strkey string
+	switch rk.Kind() {
+	case reflect.Bool:
+		strkey = strconv.FormatBool(rk.Bool())
+	case reflect.Int32, reflect.Int64:
+		strkey = strconv.FormatInt(rk.Int(), 10)
+	case reflect.Uint32, reflect.Uint64:
+		strkey = strconv.FormatUint(rk.Uint(), 10)
+	case reflect.String:
+		strkey = rk.String()
+	default:
+		return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+	}
+	err := writeString(b, strkey)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+	return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int64:
+		return writeJsonString(b, strconv.FormatInt(rv.Int(), 10))
+	case reflect.Int32:
+		ed := fd.GetEnumType()
+		if !opts.EnumsAsInts && ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				return writeJsonString(b, vd.GetName())
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint64:
+		return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Uint32:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = `"NaN"`
+		} else if math.IsInf(f, 1) {
+			str = `"Infinity"`
+		} else if math.IsInf(f, -1) {
+			str = `"-Infinity"`
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+		return writeJsonString(b, bstr)
+	case reflect.String:
+		return writeJsonString(b, rv.String())
+	default:
+		// must be a message
+		if dm, ok := v.(*Message); ok {
+			return dm.marshalJSON(b, opts)
+		} else {
+			var err error
+			if b.indentCount <= 0 || len(b.indent) == 0 {
+				err = opts.Marshal(b, v.(proto.Message))
+			} else {
+				str, err := opts.MarshalToString(v.(proto.Message))
+				if err != nil {
+					return err
+				}
+				indent := strings.Repeat(b.indent, b.indentCount)
+				pos := 0
+				// add indention prefix to each line
+				for pos < len(str) {
+					start := pos
+					nextPos := strings.Index(str[pos:], "\n")
+					if nextPos == -1 {
+						nextPos = len(str)
+					} else {
+						nextPos = pos + nextPos + 1 // include newline
+					}
+					line := str[start:nextPos]
+					if pos > 0 {
+						_, err = b.WriteString(indent)
+						if err != nil {
+							return err
+						}
+					}
+					_, err = b.WriteString(line)
+					if err != nil {
+						return err
+					}
+					pos = nextPos
+				}
+			}
+			return err
+		}
+	}
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+	if sbytes, err := json.Marshal(s); err != nil {
+		return err
+	} else {
+		_, err := b.Write(sbytes)
+		return err
+	}
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+//    m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+	return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+	return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+//  1. The JSON can refer to fields either by their JSON name or by their
+//     declared name.
+//  2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	r := newJsReader(js)
+	err := m.unmarshalJson(r, opts)
+	if err != nil {
+		return err
+	}
+	if t, err := r.poll(); err != io.EOF {
+		b, _ := ioutil.ReadAll(r.unread())
+		s := fmt.Sprintf("%v%s", t, string(b))
+		return fmt.Errorf("superfluous data found after JSON object: %q", s)
+	}
+	return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// extract json value from r
+	var js json.RawMessage
+	if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+		return true, err
+	}
+	if err := r.skip(); err != nil {
+		return true, err
+	}
+
+	// unmarshal into well-known type and then convert to dynamic message
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+		return true, err
+	}
+	return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+		return err
+	}
+
+	t, err := r.peek()
+	if err != nil {
+		return err
+	}
+	if t == nil {
+		// if json is simply "null" we do nothing
+		r.poll()
+		return nil
+	}
+
+	if err := r.beginObject(); err != nil {
+		return err
+	}
+
+	for r.hasNext() {
+		f, err := r.nextObjectKey()
+		if err != nil {
+			return err
+		}
+		fd := m.FindFieldDescriptorByJSONName(f)
+		if fd == nil {
+			if opts.AllowUnknownFields {
+				r.skip()
+				continue
+			}
+			return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+		}
+		v, err := unmarshalJsField(fd, r, m.mf, opts)
+		if err != nil {
+			return err
+		}
+		if v != nil {
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		} else if fd.GetOneOf() != nil {
+			// preserve explicit null for oneof fields (this is a little odd but
+			// mimics the behavior of jsonpb with oneofs in generated message types)
+			if fd.GetMessageType() != nil {
+				typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+				if typ != nil {
+					// typed nil
+					if typ.Kind() != reflect.Ptr {
+						typ = reflect.PtrTo(typ)
+					}
+					v = reflect.Zero(typ).Interface()
+				} else {
+					// can't use nil dynamic message, so we just use empty one instead
+					v = m.mf.NewDynamicMessage(fd.GetMessageType())
+				}
+				if err := m.setField(fd, v); err != nil {
+					return err
+				}
+			} else {
+				// not a message... explicit null makes no sense
+				return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+			}
+		} else {
+			m.clearField(fd)
+		}
+	}
+
+	if err := r.endObject(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue"
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && !isWellKnownValue(fd) {
+		// if value is null, just return nil
+		// (unless field is google.protobuf.Value, in which case
+		// we fall through to parse it as an instance where its
+		// underlying value is set to a NullValue)
+		r.poll()
+		return nil, nil
+	}
+
+	if t == json.Delim('{') && fd.IsMap() {
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valueType := entryType.FindFieldByNumber(2)
+		mp := map[interface{}]interface{}{}
+
+		// TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+		// treat this JSON object as a single map entry message. (In keeping with support of map fields as
+		// if they were normal repeated field of entry messages as well as supporting a transition from
+		// optional to repeated...)
+
+		if err := r.beginObject(); err != nil {
+			return nil, err
+		}
+		for r.hasNext() {
+			kk, err := unmarshalJsFieldElement(keyType, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := unmarshalJsFieldElement(valueType, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+		}
+		if err := r.endObject(); err != nil {
+			return nil, err
+		}
+
+		return mp, nil
+	} else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+		// We support parsing an array, even if field is not repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is not repeated, we only keep the last value in the array.
+
+		if err := r.beginArray(); err != nil {
+			return nil, err
+		}
+		var sl []interface{}
+		var v interface{}
+		for r.hasNext() {
+			var err error
+			v, err = unmarshalJsFieldElement(fd, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() && v != nil {
+				sl = append(sl, v)
+			}
+		}
+		if err := r.endArray(); err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			mp := map[interface{}]interface{}{}
+			for _, m := range sl {
+				msg := m.(*Message)
+				kk, err := msg.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				vv, err := msg.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				mp[kk] = vv
+			}
+			return mp, nil
+		} else if fd.IsRepeated() {
+			return sl, nil
+		} else {
+			return v, nil
+		}
+	} else {
+		// We support parsing a singular value, even if field is repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is repeated, we store value as singleton slice of that one value.
+
+		v, err := unmarshalJsFieldElement(fd, r, mf, opts)
+		if err != nil {
+			return nil, err
+		}
+		if v == nil {
+			return nil, nil
+		}
+		if fd.IsRepeated() {
+			return []interface{}{v}, nil
+		} else {
+			return v, nil
+		}
+	}
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+		m := mf.NewMessage(fd.GetMessageType())
+		if dm, ok := m.(*Message); ok {
+			if err := dm.unmarshalJson(r, opts); err != nil {
+				return nil, err
+			}
+		} else {
+			var msg json.RawMessage
+			if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+				return nil, err
+			}
+			if err := r.skip(); err != nil {
+				return nil, err
+			}
+			if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+				return nil, err
+			}
+		}
+		return m, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if e, err := r.nextNumber(); err != nil {
+			return nil, err
+		} else {
+			// value could be string or number
+			if i, err := e.Int64(); err != nil {
+				// number cannot be parsed, so see if it's an enum value name
+				vd := fd.GetEnumType().FindValueByName(string(e))
+				if vd != nil {
+					return vd.GetNumber(), nil
+				} else {
+					return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+				}
+			} else if i > math.MaxInt32 || i < math.MinInt32 {
+				return nil, NumericOverflowError
+			} else {
+				return int32(i), err
+			}
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := r.nextInt(); err != nil {
+			return nil, err
+		} else if i > math.MaxInt32 || i < math.MinInt32 {
+			return nil, NumericOverflowError
+		} else {
+			return int32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return r.nextInt()
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := r.nextUint(); err != nil {
+			return nil, err
+		} else if i > math.MaxUint32 {
+			return nil, NumericOverflowError
+		} else {
+			return uint32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return r.nextUint()
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if str, ok := t.(string); ok {
+			if str == "true" {
+				r.poll() // consume token
+				return true, err
+			} else if str == "false" {
+				r.poll() // consume token
+				return false, err
+			}
+		}
+		return r.nextBool()
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := r.nextFloat(); err != nil {
+			return nil, err
+		} else {
+			return float32(f), nil
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return r.nextFloat()
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return r.nextBytes()
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return r.nextString()
+
+	default:
+		return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+	}
+}
+
+type jsReader struct {
+	reader  *bytes.Reader
+	dec     *json.Decoder
+	current json.Token
+	peeked  bool
+}
+
+func newJsReader(b []byte) *jsReader {
+	reader := bytes.NewReader(b)
+	dec := json.NewDecoder(reader)
+	dec.UseNumber()
+	return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+	bufs := make([]io.Reader, 3)
+	var peeked []byte
+	if r.peeked {
+		if _, ok := r.current.(json.Delim); ok {
+			peeked = []byte(fmt.Sprintf("%v", r.current))
+		} else {
+			peeked, _ = json.Marshal(r.current)
+		}
+	}
+	readerCopy := *r.reader
+	decCopy := *r.dec
+
+	bufs[0] = bytes.NewReader(peeked)
+	bufs[1] = decCopy.Buffered()
+	bufs[2] = &readerCopy
+	return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+	return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+	if r.peeked {
+		return r.current, nil
+	}
+	t, err := r.dec.Token()
+	if err != nil {
+		return nil, err
+	}
+	r.peeked = true
+	r.current = t
+	return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+	if r.peeked {
+		ret := r.current
+		r.current = nil
+		r.peeked = false
+		return ret, nil
+	}
+	return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+	return err
+}
+
+func (r *jsReader) endObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+	return err
+}
+
+func (r *jsReader) beginArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+	return err
+}
+
+func (r *jsReader) endArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+	return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+	return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+	if err != nil {
+		return "", err
+	}
+	return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+	str, err := r.nextString()
+	if err != nil {
+		return nil, err
+	}
+	return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+	if err != nil {
+		return false, err
+	}
+	return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+	t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+	if err != nil {
+		return "", err
+	}
+	switch t := t.(type) {
+	case json.Number:
+		return t, nil
+	case string:
+		return json.Number(t), nil
+	}
+	return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+	t, err := r.poll()
+	if err != nil {
+		return err
+	}
+	if t == json.Delim('[') {
+		if err := r.skipArray(); err != nil {
+			return err
+		}
+	} else if t == json.Delim('{') {
+		if err := r.skipObject(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *jsReader) skipArray() error {
+	for r.hasNext() {
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) skipObject() error {
+	for r.hasNext() {
+		// skip object key
+		if err := r.skip(); err != nil {
+			return err
+		}
+		// and value
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endObject(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+	t, err := r.poll()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && ifNil != nil {
+		return ifNil, nil
+	}
+	if !predicate(t) {
+		return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+	}
+	return t, nil
+}
+
+type concatReader struct {
+	bufs []io.Reader
+	curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+	for {
+		if r.curr >= len(r.bufs) {
+			err = io.EOF
+			return
+		}
+		var c int
+		c, err = r.bufs[r.curr].Read(p)
+		n += c
+		if err != io.EOF {
+			return
+		}
+		r.curr++
+		p = p[c:]
+	}
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+	return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+	mf      *MessageFactory
+	files   []*desc.FileDescriptor
+	ignored map[*desc.FileDescriptor]struct{}
+	other   jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+	if r, ok := r.(*anyResolver); ok {
+		if _, ok := r.ignored[f]; ok {
+			// if the current resolver is ignoring this file, it's because another
+			// (upstream) resolver is already handling it, so nothing to do
+			return r, false
+		}
+		for _, file := range r.files {
+			if file == f {
+				// no need to wrap!
+				return r, false
+			}
+		}
+		// ignore files that will be checked by the resolver we're wrapping
+		// (we'll just delegate and let it search those files)
+		ignored := map[*desc.FileDescriptor]struct{}{}
+		for i := range r.ignored {
+			ignored[i] = struct{}{}
+		}
+		ignore(r.files, ignored)
+		return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+	}
+	return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+	for _, f := range files {
+		if _, ok := ignored[f]; ok {
+			continue
+		}
+		ignored[f] = struct{}{}
+		ignore(f.GetDependencies(), ignored)
+	}
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+
+	// see if the user-specified resolver is able to do the job
+	if r.other != nil {
+		msg, err := r.other.Resolve(typeUrl)
+		if err == nil {
+			return msg, nil
+		}
+	}
+
+	// try to find the message in our known set of files
+	checked := map[*desc.FileDescriptor]struct{}{}
+	for _, f := range r.files {
+		md := r.findMessage(f, mname, checked)
+		if md != nil {
+			return r.mf.NewMessage(md), nil
+		}
+	}
+	// failing that, see if the message factory knows about this type
+	var ktr *KnownTypeRegistry
+	if r.mf != nil {
+		ktr = r.mf.ktr
+	} else {
+		ktr = (*KnownTypeRegistry)(nil)
+	}
+	m := ktr.CreateIfKnown(mname)
+	if m != nil {
+		return m, nil
+	}
+
+	// no other resolver to fallback to? mimic default behavior
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	// if this is an ignored descriptor, skip
+	if _, ok := r.ignored[fd]; ok {
+		return nil
+	}
+
+	// bail if we've already checked this file
+	if _, ok := checked[fd]; ok {
+		return nil
+	}
+	checked[fd] = struct{}{}
+
+	// see if this file has the message
+	md := fd.FindMessage(msgName)
+	if md != nil {
+		return md
+	}
+
+	// if not, recursively search the file's imports
+	for _, dep := range fd.GetDependencies() {
+		md = r.findMessage(dep, msgName, checked)
+		if md != nil {
+			return md
+		}
+	}
+	return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..bb68d7b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,129 @@
+//+build !go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty because MapKeys()
+		// function allocates heavily.
+		return true
+	}
+
+	for _, k := range a.MapKeys() {
+		av := a.MapIndex(k)
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	for _, k := range val.MapKeys() {
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validFieldValueForRv(keyField, k)
+		if err != nil {
+			return nil, err
+		}
+		v := val.MapIndex(k)
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validFieldValueForRv(valField, v)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	for _, k := range src.MapKeys() {
+		if !canConvert(k, kt) {
+			return false
+		}
+		if !canConvert(src.MapIndex(k), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	for _, k := range src.MapKeys() {
+		v := src.MapIndex(k)
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	for _, k := range rv.MapKeys() {
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		v := rv.MapIndex(k)
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..f5ffd67
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,137 @@
+//+build go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty
+		return true
+	}
+
+	iter := a.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		av := iter.Value()
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	iter := val.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validFieldValueForRv(keyField, k)
+		if err != nil {
+			return nil, err
+		}
+		v := iter.Value()
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validFieldValueForRv(valField, v)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		if !canConvert(iter.Key(), kt) {
+			return false
+		}
+		if !canConvert(iter.Value(), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	iter := rv.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			panic(err.Error())
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			panic(err.Error())
+		}
+	} else {
+		proto.Merge(dst, src)
+	}
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			return err
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			return err
+		}
+	} else {
+		// proto.Merge panics on bad input, so we first verify
+		// inputs and return error instead of panic
+		out := reflect.ValueOf(dst)
+		if out.IsNil() {
+			return errors.New("proto: nil destination")
+		}
+		in := reflect.ValueOf(src)
+		if in.Type() != out.Type() {
+			return errors.New("proto: type mismatch")
+		}
+		proto.Merge(dst, src)
+	}
+	return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+	rv := reflect.ValueOf(val)
+
+	if fd.IsMap() && rv.Kind() == reflect.Map {
+		return mergeMapField(m, fd, rv)
+	}
+
+	if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+		for i := 0; i < rv.Len(); i++ {
+			e := rv.Index(i)
+			if e.Kind() == reflect.Interface && !e.IsNil() {
+				e = e.Elem()
+			}
+			if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	if fd.IsRepeated() {
+		return m.addRepeatedField(fd, val)
+	} else if fd.GetMessageType() == nil {
+		return m.setField(fd, val)
+	}
+
+	// it's a message type, so we want to merge contents
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+
+	existing, _ := m.doGetField(fd, true)
+	if existing != nil && !reflect.ValueOf(existing).IsNil() {
+		return TryMerge(existing.(proto.Message), val.(proto.Message))
+	}
+
+	// no existing message, so just set field
+	m.internalSetField(fd, val)
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..6c54de8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,189 @@
+package dynamic
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+	er  *ExtensionRegistry
+	ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+//   NewMessageFactoryWithRegistries(
+//       NewExtensionRegistryWithDefaults(),
+//       NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+	return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+	return &MessageFactory{
+		er:  er,
+		ktr: ktr,
+	}
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+	var ktr *KnownTypeRegistry
+	if f != nil {
+		ktr = f.ktr
+	}
+	if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+		return m
+	}
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.er
+}
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+	excludeWkt     bool
+	includeDefault bool
+	mu             sync.RWMutex
+	types          map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+	return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+	return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.types == nil {
+		r.types = map[string]reflect.Type{}
+	}
+	for _, kt := range kts {
+		r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+	}
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+	msgType := r.GetKnownType(messageName)
+	if msgType == nil {
+		return nil
+	}
+
+	if msgType.Kind() == reflect.Ptr {
+		return reflect.New(msgType.Elem()).Interface().(proto.Message)
+	} else {
+		return reflect.New(msgType).Elem().Interface().(proto.Message)
+	}
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+	var msgType reflect.Type
+	if r == nil {
+		// a nil registry behaves the same as zero value instance: only know of well-known types
+		t := proto.MessageType(messageName)
+		if t != nil && t.Implements(typeOfWkt) {
+			msgType = t
+		}
+	} else {
+		if r.includeDefault {
+			msgType = proto.MessageType(messageName)
+		} else if !r.excludeWkt {
+			t := proto.MessageType(messageName)
+			if t != nil && t.Implements(typeOfWkt) {
+				msgType = t
+			}
+		}
+		if msgType == nil {
+			r.mu.RLock()
+			msgType = r.types[messageName]
+			r.mu.RUnlock()
+		}
+	}
+
+	return msgType
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..72636f2
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1175 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"text/scanner"
+	"unicode"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/codec"
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+	var b indentBuffer
+	b.indentCount = -1 // no indentation
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+	var b indentBuffer
+	b.indent = "  " // TODO: option for indent?
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+	// TODO: option for emitting extended Any format?
+	first := true
+	// first the known fields
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		v := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd.IsMap() {
+			md := fd.GetMessageType()
+			kfd := md.FindFieldByNumber(1)
+			vfd := md.FindFieldByNumber(2)
+			mp := v.(map[interface{}]interface{})
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, mk := range keys {
+				mv := mp[mk]
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+				if err != nil {
+					return err
+				}
+			}
+		} else if fd.IsRepeated() {
+			sl := v.([]interface{})
+			for _, slv := range sl {
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldText(b, fd, slv)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldText(b, fd, v)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	// then the unknown fields
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		ufs := m.unknownFields[itag]
+		for _, uf := range ufs {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			_, err = fmt.Fprintf(b, "%d", tag)
+			if err != nil {
+				return err
+			}
+			if uf.Encoding == proto.WireStartGroup {
+				err = b.WriteByte('{')
+				if err != nil {
+					return err
+				}
+				err = b.start()
+				if err != nil {
+					return err
+				}
+				in := codec.NewBuffer(uf.Contents)
+				err = marshalUnknownGroupText(b, in, true)
+				if err != nil {
+					return err
+				}
+				err = b.end()
+				if err != nil {
+					return err
+				}
+				err = b.WriteByte('}')
+				if err != nil {
+					return err
+				}
+			} else {
+				err = b.sep()
+				if err != nil {
+					return err
+				}
+				if uf.Encoding == proto.WireBytes {
+					err = writeString(b, string(uf.Contents))
+					if err != nil {
+						return err
+					}
+				} else {
+					_, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+	var name string
+	if fd.IsExtension() {
+		name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+	} else {
+		name = fd.GetName()
+	}
+	_, err := b.WriteString(name)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	err = b.WriteByte('<')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	err = marshalKnownFieldText(b, kfd, mk)
+	if err != nil {
+		return err
+	}
+	err = b.next()
+	if err != nil {
+		return err
+	}
+	err = marshalKnownFieldText(b, vfd, mv)
+	if err != nil {
+		return err
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+	group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+	if group {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+		} else {
+			name = fd.GetMessageType().GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+	} else {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+		} else {
+			name = fd.GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+		err = b.sep()
+		if err != nil {
+			return err
+		}
+	}
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int32, reflect.Int64:
+		ed := fd.GetEnumType()
+		if ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				_, err := b.WriteString(vd.GetName())
+				return err
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint32, reflect.Uint64:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = "nan"
+		} else if math.IsInf(f, 1) {
+			str = "inf"
+		} else if math.IsInf(f, -1) {
+			str = "-inf"
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		return writeString(b, string(rv.Bytes()))
+	case reflect.String:
+		return writeString(b, rv.String())
+	default:
+		var err error
+		if group {
+			err = b.WriteByte('{')
+		} else {
+			err = b.WriteByte('<')
+		}
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+		// must be a message
+		if dm, ok := v.(*Message); ok {
+			err = dm.marshalText(b)
+			if err != nil {
+				return err
+			}
+		} else {
+			err = proto.CompactText(b, v.(proto.Message))
+			if err != nil {
+				return err
+			}
+		}
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		if group {
+			return b.WriteByte('}')
+		} else {
+			return b.WriteByte('>')
+		}
+	}
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := b.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = b.WriteString("\\n")
+		case '\r':
+			_, err = b.WriteString("\\r")
+		case '\t':
+			_, err = b.WriteString("\\t")
+		case '"':
+			_, err = b.WriteString("\\")
+		case '\\':
+			_, err = b.WriteString("\\\\")
+		default:
+			if c >= 0x20 && c < 0x7f {
+				err = b.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(b, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error {
+	first := true
+	for {
+		if in.EOF() {
+			if topLevel {
+				return nil
+			}
+			// this is a nested message: we are expecting an end-group tag, not EOF!
+			return io.ErrUnexpectedEOF
+		}
+		tag, wireType, err := in.DecodeTagAndWireType()
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireEndGroup {
+			return nil
+		}
+		err = b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		_, err = fmt.Fprintf(b, "%d", tag)
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireStartGroup {
+			err = b.WriteByte('{')
+			if err != nil {
+				return err
+			}
+			err = b.start()
+			if err != nil {
+				return err
+			}
+			err = marshalUnknownGroupText(b, in, false)
+			if err != nil {
+				return err
+			}
+			err = b.end()
+			if err != nil {
+				return err
+			}
+			err = b.WriteByte('}')
+			if err != nil {
+				return err
+			}
+			continue
+		} else {
+			err = b.sep()
+			if err != nil {
+				return err
+			}
+			if wireType == proto.WireBytes {
+				contents, err := in.DecodeRawBytes(false)
+				if err != nil {
+					return err
+				}
+				err = writeString(b, string(contents))
+				if err != nil {
+					return err
+				}
+			} else {
+				var v uint64
+				switch wireType {
+				case proto.WireVarint:
+					v, err = in.DecodeVarint()
+				case proto.WireFixed32:
+					v, err = in.DecodeFixed32()
+				case proto.WireFixed64:
+					v, err = in.DecodeFixed64()
+				default:
+					return proto.ErrInternalBadWireType
+				}
+				if err != nil {
+					return err
+				}
+				_, err = b.WriteString(strconv.FormatUint(v, 10))
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeText(text); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+	return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+	for {
+		tok := tr.next()
+		if tok.tokTyp == end {
+			return nil
+		}
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		var fd *desc.FieldDescriptor
+		var extendedAnyType *desc.MessageDescriptor
+		if tok.tokTyp == tokenInt {
+			// tag number (indicates unknown field)
+			tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+			if err != nil {
+				return err
+			}
+			itag := int32(tag)
+			fd = m.FindFieldDescriptor(itag)
+			if fd == nil {
+				// can't parse the value w/out field descriptor, so skip it
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				} else if tok.tokTyp == tokenOpenBrace {
+					if err := skipMessageText(tr, true); err != nil {
+						return err
+					}
+				} else if tok.tokTyp == tokenColon {
+					if err := skipFieldValueText(tr); err != nil {
+						return err
+					}
+				} else {
+					return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+				}
+				tok = tr.peek()
+				if tok.tokTyp.IsSep() {
+					tr.next() // consume separator
+				}
+				continue
+			}
+		} else {
+			fieldName, err := unmarshalFieldNameText(tr, tok)
+			if err != nil {
+				return err
+			}
+			fd = m.FindFieldDescriptorByName(fieldName)
+			if fd == nil {
+				// See if it's a group name
+				for _, field := range m.md.GetFields() {
+					if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+						fd = field
+						break
+					}
+				}
+				if fd == nil {
+					// maybe this is an extended Any
+					if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+						// strip surrounding "[" and "]" and extract type name from URL
+						typeUrl := fieldName[1 : len(fieldName)-1]
+						mname := typeUrl
+						if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+							mname = mname[slash+1:]
+						}
+						// TODO: add a way to weave an AnyResolver to this point
+						extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+						if extendedAnyType == nil {
+							return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+						}
+						// field 1 is "type_url"
+						typeUrlField := m.md.FindFieldByNumber(1)
+						if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+							return err
+						}
+					} else {
+						// TODO: add a flag to just ignore unrecognized field names
+						return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+					}
+				}
+			}
+		}
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		if extendedAnyType != nil {
+			// consume optional colon; make sure this is a "start message" token
+			if tok.tokTyp == tokenColon {
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				}
+			}
+			if tok.tokTyp.EndToken() == tokenError {
+				return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+			}
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(extendedAnyType)
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			// now we marshal the message to bytes and store in the Any
+			b, err := g.Marshal()
+			if err != nil {
+				return err
+			}
+			// field 2 is "value"
+			anyValueField := m.md.FindFieldByNumber(2)
+			if err := m.TrySetField(anyValueField, b); err != nil {
+				return err
+			}
+
+		} else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
+			fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+			tok.tokTyp.EndToken() != tokenError {
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			if fd.IsRepeated() {
+				if err := m.TryAddRepeatedField(fd, g); err != nil {
+					return err
+				}
+			} else {
+				if err := m.TrySetField(fd, g); err != nil {
+					return err
+				}
+			}
+		} else {
+			if tok.tokTyp != tokenColon {
+				return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+			}
+			if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+				return err
+			}
+		}
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+	md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+	if md == nil {
+		// couldn't find it; see if we have this message linked in
+		md, _ = desc.LoadMessageDescriptor(name)
+	}
+	return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	if _, ok := seen[fd]; ok {
+		// already checked this file
+		return nil
+	}
+	seen[fd] = struct{}{}
+	md := fd.FindMessage(name)
+	if md != nil {
+		return md
+	}
+	// not in this file so recursively search its deps
+	for _, dep := range fd.GetDependencies() {
+		md = findMessageInTransitiveDeps(name, dep, seen)
+		if md != nil {
+			return md
+		}
+	}
+	// couldn't find it
+	return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+	var msg string
+	if tok.tokTyp == tokenError {
+		msg = tok.val.(error).Error()
+	} else {
+		msg = fmt.Sprintf(format, args...)
+	}
+	return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+	var set setFunction
+	if fd.IsRepeated() {
+		set = (*Message).addRepeatedField
+	} else {
+		set = mergeField
+	}
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+		}
+	}
+	return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	}
+
+	var expected string
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if tok.tokTyp == tokenIdent {
+			if tok.val.(string) == "true" {
+				return set(m, fd, true)
+			} else if tok.val.(string) == "false" {
+				return set(m, fd, false)
+			}
+		}
+		expected = "boolean value"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, []byte(tok.val.(string)))
+		}
+		expected = "bytes string value"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, tok.val)
+		}
+		expected = "string value"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, float32(tok.val.(float64)))
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, float32(f))
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, float32(math.Inf(1)))
+			} else if ident == "nan" {
+				return set(m, fd, float32(math.NaN()))
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, float32(math.Inf(-1)))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, tok.val)
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, f)
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, math.Inf(1))
+			} else if ident == "nan" {
+				return set(m, fd, math.NaN())
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, math.Inf(-1))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, uint32(i))
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if tok.tokTyp == tokenIdent {
+			// TODO: add a flag to just ignore unrecognized enum value names?
+			vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+			if vd != nil {
+				return set(m, fd, vd.GetNumber())
+			}
+		} else if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+		endTok := tok.tokTyp.EndToken()
+		if endTok != tokenError {
+			dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := dm.unmarshalText(tr, endTok); err != nil {
+				return err
+			}
+			// TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+			// proto package to unmarshal it. But the text parser isn't particularly amenable
+			// to that, so we instead convert a dynamic message to a generated one if the
+			// known-type registry knows about the generated type...
+			var ktr *KnownTypeRegistry
+			if m.mf != nil {
+				ktr = m.mf.ktr
+			}
+			pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+			if pm != nil {
+				if err := dm.ConvertTo(pm); err != nil {
+					return set(m, fd, pm)
+				}
+			}
+			return set(m, fd, dm)
+		}
+		expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+	default:
+		return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+	}
+
+	// if we get here, token was wrong type; create error message
+	var article string
+	if strings.Contains("aieou", expected[0:1]) {
+		article = "an"
+	} else {
+		article = "a"
+	}
+	return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+	if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+		// extension name
+		var closeType tokenType
+		var closeChar string
+		if tok.tokTyp == tokenOpenBracket {
+			closeType = tokenCloseBracket
+			closeChar = "close bracket ']'"
+		} else {
+			closeType = tokenCloseParen
+			closeChar = "close paren ')'"
+		}
+		// must be followed by an identifier
+		idents := make([]string, 0, 1)
+		for {
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp != tokenIdent {
+				return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+			}
+			idents = append(idents, tok.val.(string))
+			// and then close bracket/paren, or "/" to keep adding URL elements to name
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp == closeType {
+				break
+			} else if tok.tokTyp != tokenSlash {
+				return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+			}
+		}
+		return "[" + strings.Join(idents, "/") + "]", nil
+	} else if tok.tokTyp == tokenIdent {
+		// normal field name
+		return tok.val.(string), nil
+	} else {
+		return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+	}
+}
+
+func skipFieldNameText(tr *txtReader) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	} else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+		return nil
+	} else {
+		_, err := unmarshalFieldNameText(tr, tok)
+		return err
+	}
+}
+
+func skipFieldValueText(tr *txtReader) error {
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := skipFieldElementText(tr); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+
+		}
+	}
+	return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+	tok := tr.next()
+	switch tok.tokTyp {
+	case tokenEOF:
+		return io.ErrUnexpectedEOF
+	case tokenInt, tokenFloat, tokenString, tokenIdent:
+		return nil
+	case tokenOpenAngle:
+		return skipMessageText(tr, false)
+	default:
+		return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+	}
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+	for {
+		tok := tr.peek()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if isGroup && tok.tokTyp == tokenCloseBrace {
+			return nil
+		} else if !isGroup && tok.tokTyp == tokenCloseAngle {
+			return nil
+		}
+
+		// field name or tag
+		if err := skipFieldNameText(tr); err != nil {
+			return err
+		}
+
+		// field value
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if tok.tokTyp == tokenOpenBrace {
+			if err := skipMessageText(tr, true); err != nil {
+				return err
+			}
+		} else if tok.tokTyp == tokenColon {
+			if err := skipFieldValueText(tr); err != nil {
+				return err
+			}
+		} else {
+			return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+		}
+
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+
+type tokenType int
+
+const (
+	tokenError tokenType = iota
+	tokenEOF
+	tokenIdent
+	tokenString
+	tokenInt
+	tokenFloat
+	tokenColon
+	tokenComma
+	tokenSemiColon
+	tokenOpenBrace
+	tokenCloseBrace
+	tokenOpenBracket
+	tokenCloseBracket
+	tokenOpenAngle
+	tokenCloseAngle
+	tokenOpenParen
+	tokenCloseParen
+	tokenSlash
+	tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+	return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+	switch t {
+	case tokenOpenAngle:
+		return tokenCloseAngle
+	case tokenOpenBrace:
+		return tokenCloseBrace
+	default:
+		return tokenError
+	}
+}
+
+type token struct {
+	tokTyp tokenType
+	val    interface{}
+	txt    string
+	pos    scanner.Position
+}
+
+type txtReader struct {
+	scanner    scanner.Scanner
+	peeked     token
+	havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+	sc := scanner.Scanner{}
+	sc.Init(bytes.NewReader(text))
+	sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+		scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+	// identifiers are same restrictions as Go identifiers, except we also allow dots since
+	// we accept fully-qualified names
+	sc.IsIdentRune = func(ch rune, i int) bool {
+		return ch == '_' || unicode.IsLetter(ch) ||
+			(i > 0 && unicode.IsDigit(ch)) ||
+			(i > 0 && ch == '.')
+	}
+	// ignore errors; we handle them if/when we see malformed tokens
+	sc.Error = func(s *scanner.Scanner, msg string) {}
+	return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+	if p.havePeeked {
+		return &p.peeked
+	}
+	t := p.scanner.Scan()
+	if t == scanner.EOF {
+		p.peeked.tokTyp = tokenEOF
+		p.peeked.val = nil
+		p.peeked.txt = ""
+		p.peeked.pos = p.scanner.Position
+	} else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+		p.peeked.tokTyp = tokenError
+		p.peeked.val = err
+	}
+	p.havePeeked = true
+	return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+	p.peeked.pos = pos
+	p.peeked.txt = text
+	switch t {
+	case scanner.Ident:
+		p.peeked.tokTyp = tokenIdent
+		p.peeked.val = text
+	case scanner.Int:
+		p.peeked.tokTyp = tokenInt
+		p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+	case scanner.Float:
+		p.peeked.tokTyp = tokenFloat
+		var err error
+		if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+			return err
+		}
+	case scanner.Char, scanner.String:
+		p.peeked.tokTyp = tokenString
+		var err error
+		if p.peeked.val, err = strconv.Unquote(text); err != nil {
+			return err
+		}
+	case '-': // unary minus, for negative ints and floats
+		ch := p.scanner.Peek()
+		if ch < '0' || ch > '9' {
+			p.peeked.tokTyp = tokenMinus
+			p.peeked.val = '-'
+		} else {
+			t := p.scanner.Scan()
+			if t == scanner.EOF {
+				return io.ErrUnexpectedEOF
+			} else if t == scanner.Float {
+				p.peeked.tokTyp = tokenFloat
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				var err error
+				if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+					p.peeked.pos = p.scanner.Position
+					return err
+				}
+			} else if t == scanner.Int {
+				p.peeked.tokTyp = tokenInt
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+			} else {
+				p.peeked.pos = p.scanner.Position
+				return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+			}
+		}
+	case ':':
+		p.peeked.tokTyp = tokenColon
+		p.peeked.val = ':'
+	case ',':
+		p.peeked.tokTyp = tokenComma
+		p.peeked.val = ','
+	case ';':
+		p.peeked.tokTyp = tokenSemiColon
+		p.peeked.val = ';'
+	case '{':
+		p.peeked.tokTyp = tokenOpenBrace
+		p.peeked.val = '{'
+	case '}':
+		p.peeked.tokTyp = tokenCloseBrace
+		p.peeked.val = '}'
+	case '<':
+		p.peeked.tokTyp = tokenOpenAngle
+		p.peeked.val = '<'
+	case '>':
+		p.peeked.tokTyp = tokenCloseAngle
+		p.peeked.val = '>'
+	case '[':
+		p.peeked.tokTyp = tokenOpenBracket
+		p.peeked.val = '['
+	case ']':
+		p.peeked.tokTyp = tokenCloseBracket
+		p.peeked.val = ']'
+	case '(':
+		p.peeked.tokTyp = tokenOpenParen
+		p.peeked.val = '('
+	case ')':
+		p.peeked.tokTyp = tokenCloseParen
+		p.peeked.val = ')'
+	case '/':
+		// only allowed to separate URL components in expanded Any format
+		p.peeked.tokTyp = tokenSlash
+		p.peeked.val = '/'
+	default:
+		return fmt.Errorf("invalid character: %c", t)
+	}
+	return nil
+}
+
+func (p *txtReader) next() *token {
+	t := p.peek()
+	if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+		p.havePeeked = false
+	}
+	return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..4a8b47a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+	// Files for the github.com/golang/protobuf/ptypes package at one point were
+	// registered using the path where the proto files are mirrored in GOPATH,
+	// inside the golang/protobuf repo.
+	// (Fixed as of https://github.com/golang/protobuf/pull/412)
+	"google/protobuf/any.proto":       "github.com/golang/protobuf/ptypes/any/any.proto",
+	"google/protobuf/duration.proto":  "github.com/golang/protobuf/ptypes/duration/duration.proto",
+	"google/protobuf/empty.proto":     "github.com/golang/protobuf/ptypes/empty/empty.proto",
+	"google/protobuf/struct.proto":    "github.com/golang/protobuf/ptypes/struct/struct.proto",
+	"google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+	"google/protobuf/wrappers.proto":  "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+	// Files for the google.golang.org/genproto/protobuf package at one point
+	// were registered with an anomalous "src/" prefix.
+	// (Fixed as of https://github.com/google/go-genproto/pull/31)
+	"google/protobuf/api.proto":            "src/google/protobuf/api.proto",
+	"google/protobuf/field_mask.proto":     "src/google/protobuf/field_mask.proto",
+	"google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+	"google/protobuf/type.proto":           "src/google/protobuf/type.proto",
+
+	// Other standard files (descriptor.proto and compiler/plugin.proto) are
+	// registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+	// We provide aliasing in both directions, to support files with the
+	// proper import path linked against older versions of the generated
+	// files AND files that used the aliased import path but linked against
+	// newer versions of the generated files (which register with the
+	// correct path).
+
+	// Get all files defined above
+	keys := make([]string, 0, len(StdFileAliases))
+	for k := range StdFileAliases {
+		keys = append(keys, k)
+	}
+	// And add inverse mappings
+	for _, k := range keys {
+		alias := StdFileAliases[k]
+		StdFileAliases[alias] = k
+	}
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+	return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+	fdb := proto.FileDescriptor(file)
+	aliased := false
+	if fdb == nil {
+		var ok bool
+		alias, ok := StdFileAliases[file]
+		if ok {
+			aliased = true
+			if fdb = proto.FileDescriptor(alias); fdb == nil {
+				return nil, ErrNoSuchFile(file)
+			}
+		} else {
+			return nil, ErrNoSuchFile(file)
+		}
+	}
+
+	fd, err := DecodeFileDescriptor(file, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	if aliased {
+		// the file descriptor will have the alias used to load it, but
+		// we need it to have the specified name in order to link it
+		fd.Name = proto.String(file)
+	}
+
+	return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+	raw, err := decompress(fdb)
+	if err != nil {
+		return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+	}
+	fd := dpb.FileDescriptorProto{}
+	if err := proto.Unmarshal(raw, &fd); err != nil {
+		return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+	}
+	return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	out, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 0000000..14127cd
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 0000000..949b77e
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,40 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+	"syscall"
+	
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+	sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 0000000..716c613
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 0000000..ef18d8f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var (
+	kernel32Dll    *syscall.LazyDLL  = syscall.NewLazyDLL("Kernel32.dll")
+	setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+	const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+	var mode uint32
+	err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+	if err != nil {
+		return err
+	}
+
+	if enable {
+		mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	} else {
+		mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	}
+
+	ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+	if ret == 0 {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/looplab/fsm/.gitignore b/vendor/github.com/looplab/fsm/.gitignore
new file mode 100644
index 0000000..969bdaa
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/.gitignore
@@ -0,0 +1,28 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.sublime-project
+*.sublime-workspace
+
+.DS_Store
+.wercker
diff --git a/vendor/github.com/looplab/fsm/.travis.yml b/vendor/github.com/looplab/fsm/.travis.yml
new file mode 100644
index 0000000..b9b0eda
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/.travis.yml
@@ -0,0 +1,26 @@
+language: go
+
+go:
+  - "1.11"
+
+services:
+  - docker
+
+cache:
+  directories:
+    - ${GOPATH}/pkg/mod
+
+jobs:
+  include:
+    - stage: test
+      if: type == pull_request
+      script: make test
+      env:
+        - GO111MODULE=on
+
+    - stage: test_and_cover
+      name: "Test (with coverage)"
+      if: type != pull_request
+      script: make cover publish_cover
+      env:
+        - GO111MODULE=on
diff --git a/vendor/github.com/looplab/fsm/LICENSE b/vendor/github.com/looplab/fsm/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/looplab/fsm/Makefile b/vendor/github.com/looplab/fsm/Makefile
new file mode 100644
index 0000000..6cad29f
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/Makefile
@@ -0,0 +1,22 @@
+default: services test
+
+.PHONY: test
+test:
+	go test ./...
+
+.PHONY: cover
+cover:
+	go list -f '{{if len .TestGoFiles}}"go test -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}"{{end}}' ./... | xargs -L 1 sh -c
+
+.PHONY: publish_cover
+publish_cover: cover
+	go get -d golang.org/x/tools/cmd/cover
+	go get github.com/modocache/gover
+	go get github.com/mattn/goveralls
+	gover
+	@goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken=$(COVERALLS_TOKEN)
+
+.PHONY: clean
+clean:
+	@find . -name \.coverprofile -type f -delete
+	@rm -f gover.coverprofile
diff --git a/vendor/github.com/looplab/fsm/README.md b/vendor/github.com/looplab/fsm/README.md
new file mode 100644
index 0000000..f07e5eb
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/README.md
@@ -0,0 +1,117 @@
+[![Build Status](https://travis-ci.com/looplab/fsm.svg?branch=master)](https://travis-ci.com/looplab/fsm)
+[![Coverage Status](https://img.shields.io/coveralls/looplab/fsm.svg)](https://coveralls.io/r/looplab/fsm)
+[![GoDoc](https://godoc.org/github.com/looplab/fsm?status.svg)](https://godoc.org/github.com/looplab/fsm)
+[![Go Report Card](https://goreportcard.com/badge/looplab/fsm)](https://goreportcard.com/report/looplab/fsm)
+
+# FSM for Go
+
+FSM is a finite state machine for Go.
+
+It is heavily based on two FSM implementations:
+
+- Javascript Finite State Machine, https://github.com/jakesgordon/javascript-state-machine
+
+- Fysom for Python, https://github.com/oxplot/fysom (forked at https://github.com/mriehl/fysom)
+
+For API docs and examples see http://godoc.org/github.com/looplab/fsm
+
+# Basic Example
+
+From examples/simple.go:
+
+```go
+package main
+
+import (
+    "fmt"
+    "github.com/looplab/fsm"
+)
+
+func main() {
+    fsm := fsm.NewFSM(
+        "closed",
+        fsm.Events{
+            {Name: "open", Src: []string{"closed"}, Dst: "open"},
+            {Name: "close", Src: []string{"open"}, Dst: "closed"},
+        },
+        fsm.Callbacks{},
+    )
+
+    fmt.Println(fsm.Current())
+
+    err := fsm.Event("open")
+    if err != nil {
+        fmt.Println(err)
+    }
+
+    fmt.Println(fsm.Current())
+
+    err = fsm.Event("close")
+    if err != nil {
+        fmt.Println(err)
+    }
+
+    fmt.Println(fsm.Current())
+}
+```
+
+# Usage as a struct field
+
+From examples/struct.go:
+
+```go
+package main
+
+import (
+    "fmt"
+    "github.com/looplab/fsm"
+)
+
+type Door struct {
+    To  string
+    FSM *fsm.FSM
+}
+
+func NewDoor(to string) *Door {
+    d := &Door{
+        To: to,
+    }
+
+    d.FSM = fsm.NewFSM(
+        "closed",
+        fsm.Events{
+            {Name: "open", Src: []string{"closed"}, Dst: "open"},
+            {Name: "close", Src: []string{"open"}, Dst: "closed"},
+        },
+        fsm.Callbacks{
+            "enter_state": func(e *fsm.Event) { d.enterState(e) },
+        },
+    )
+
+    return d
+}
+
+func (d *Door) enterState(e *fsm.Event) {
+    fmt.Printf("The door to %s is %s\n", d.To, e.Dst)
+}
+
+func main() {
+    door := NewDoor("heaven")
+
+    err := door.FSM.Event("open")
+    if err != nil {
+        fmt.Println(err)
+    }
+
+    err = door.FSM.Event("close")
+    if err != nil {
+        fmt.Println(err)
+    }
+}
+```
+
+# License
+
+FSM is licensed under Apache License 2.0
+
+http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/looplab/fsm/errors.go b/vendor/github.com/looplab/fsm/errors.go
new file mode 100644
index 0000000..9c32a49
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/errors.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2013 - Max Persson <max@looplab.se>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fsm
+
+// InvalidEventError is returned by FSM.Event() when the event cannot be called
+// in the current state.
+type InvalidEventError struct {
+	Event string
+	State string
+}
+
+func (e InvalidEventError) Error() string {
+	return "event " + e.Event + " inappropriate in current state " + e.State
+}
+
+// UnknownEventError is returned by FSM.Event() when the event is not defined.
+type UnknownEventError struct {
+	Event string
+}
+
+func (e UnknownEventError) Error() string {
+	return "event " + e.Event + " does not exist"
+}
+
+// InTransitionError is returned by FSM.Event() when an asynchronous transition
+// is already in progress.
+type InTransitionError struct {
+	Event string
+}
+
+func (e InTransitionError) Error() string {
+	return "event " + e.Event + " inappropriate because previous transition did not complete"
+}
+
+// NotInTransitionError is returned by FSM.Transition() when an asynchronous
+// transition is not in progress.
+type NotInTransitionError struct{}
+
+func (e NotInTransitionError) Error() string {
+	return "transition inappropriate because no state change in progress"
+}
+
+// NoTransitionError is returned by FSM.Event() when no transition have happened,
+// for example if the source and destination states are the same.
+type NoTransitionError struct {
+	Err error
+}
+
+func (e NoTransitionError) Error() string {
+	if e.Err != nil {
+		return "no transition with error: " + e.Err.Error()
+	}
+	return "no transition"
+}
+
+// CanceledError is returned by FSM.Event() when a callback have canceled a
+// transition.
+type CanceledError struct {
+	Err error
+}
+
+func (e CanceledError) Error() string {
+	if e.Err != nil {
+		return "transition canceled with error: " + e.Err.Error()
+	}
+	return "transition canceled"
+}
+
+// AsyncError is returned by FSM.Event() when a callback have initiated an
+// asynchronous state transition.
+type AsyncError struct {
+	Err error
+}
+
+func (e AsyncError) Error() string {
+	if e.Err != nil {
+		return "async started with error: " + e.Err.Error()
+	}
+	return "async started"
+}
+
+// InternalError is returned by FSM.Event() and should never occur. It is a
+// probably because of a bug.
+type InternalError struct{}
+
+func (e InternalError) Error() string {
+	return "internal error on state transition"
+}
diff --git a/vendor/github.com/looplab/fsm/event.go b/vendor/github.com/looplab/fsm/event.go
new file mode 100644
index 0000000..fdca2d0
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/event.go
@@ -0,0 +1,62 @@
+// Copyright (c) 2013 - Max Persson <max@looplab.se>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fsm
+
+// Event is the info that get passed as a reference in the callbacks.
+type Event struct {
+	// FSM is a reference to the current FSM.
+	FSM *FSM
+
+	// Event is the event name.
+	Event string
+
+	// Src is the state before the transition.
+	Src string
+
+	// Dst is the state after the transition.
+	Dst string
+
+	// Err is an optional error that can be returned from a callback.
+	Err error
+
+	// Args is a optinal list of arguments passed to the callback.
+	Args []interface{}
+
+	// canceled is an internal flag set if the transition is canceled.
+	canceled bool
+
+	// async is an internal flag set if the transition should be asynchronous
+	async bool
+}
+
+// Cancel can be called in before_<EVENT> or leave_<STATE> to cancel the
+// current transition before it happens. It takes an opitonal error, which will
+// overwrite e.Err if set before.
+func (e *Event) Cancel(err ...error) {
+	e.canceled = true
+
+	if len(err) > 0 {
+		e.Err = err[0]
+	}
+}
+
+// Async can be called in leave_<STATE> to do an asynchronous state transition.
+//
+// The current state transition will be on hold in the old state until a final
+// call to Transition is made. This will comlete the transition and possibly
+// call the other callbacks.
+func (e *Event) Async() {
+	e.async = true
+}
diff --git a/vendor/github.com/looplab/fsm/fsm.go b/vendor/github.com/looplab/fsm/fsm.go
new file mode 100644
index 0000000..9da9db3
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/fsm.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2013 - Max Persson <max@looplab.se>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fsm implements a finite state machine.
+//
+// It is heavily based on two FSM implementations:
+//
+// Javascript Finite State Machine
+// https://github.com/jakesgordon/javascript-state-machine
+//
+// Fysom for Python
+// https://github.com/oxplot/fysom (forked at https://github.com/mriehl/fysom)
+//
+package fsm
+
+import (
+	"strings"
+	"sync"
+)
+
+// transitioner is an interface for the FSM's transition function.
+type transitioner interface {
+	transition(*FSM) error
+}
+
+// FSM is the state machine that holds the current state.
+//
+// It has to be created with NewFSM to function properly.
+type FSM struct {
+	// current is the state that the FSM is currently in.
+	current string
+
+	// transitions maps events and source states to destination states.
+	transitions map[eKey]string
+
+	// callbacks maps events and targers to callback functions.
+	callbacks map[cKey]Callback
+
+	// transition is the internal transition functions used either directly
+	// or when Transition is called in an asynchronous state transition.
+	transition func()
+	// transitionerObj calls the FSM's transition() function.
+	transitionerObj transitioner
+
+	// stateMu guards access to the current state.
+	stateMu sync.RWMutex
+	// eventMu guards access to Event() and Transition().
+	eventMu sync.Mutex
+}
+
+// EventDesc represents an event when initializing the FSM.
+//
+// The event can have one or more source states that is valid for performing
+// the transition. If the FSM is in one of the source states it will end up in
+// the specified destination state, calling all defined callbacks as it goes.
+type EventDesc struct {
+	// Name is the event name used when calling for a transition.
+	Name string
+
+	// Src is a slice of source states that the FSM must be in to perform a
+	// state transition.
+	Src []string
+
+	// Dst is the destination state that the FSM will be in if the transition
+	// succeds.
+	Dst string
+}
+
+// Callback is a function type that callbacks should use. Event is the current
+// event info as the callback happens.
+type Callback func(*Event)
+
+// Events is a shorthand for defining the transition map in NewFSM.
+type Events []EventDesc
+
+// Callbacks is a shorthand for defining the callbacks in NewFSM.a
+type Callbacks map[string]Callback
+
+// NewFSM constructs a FSM from events and callbacks.
+//
+// The events and transitions are specified as a slice of Event structs
+// specified as Events. Each Event is mapped to one or more internal
+// transitions from Event.Src to Event.Dst.
+//
+// Callbacks are added as a map specified as Callbacks where the key is parsed
+// as the callback event as follows, and called in the same order:
+//
+// 1. before_<EVENT> - called before event named <EVENT>
+//
+// 2. before_event - called before all events
+//
+// 3. leave_<OLD_STATE> - called before leaving <OLD_STATE>
+//
+// 4. leave_state - called before leaving all states
+//
+// 5. enter_<NEW_STATE> - called after entering <NEW_STATE>
+//
+// 6. enter_state - called after entering all states
+//
+// 7. after_<EVENT> - called after event named <EVENT>
+//
+// 8. after_event - called after all events
+//
+// There are also two short form versions for the most commonly used callbacks.
+// They are simply the name of the event or state:
+//
+// 1. <NEW_STATE> - called after entering <NEW_STATE>
+//
+// 2. <EVENT> - called after event named <EVENT>
+//
+// If both a shorthand version and a full version is specified it is undefined
+// which version of the callback will end up in the internal map. This is due
+// to the psuedo random nature of Go maps. No checking for multiple keys is
+// currently performed.
+func NewFSM(initial string, events []EventDesc, callbacks map[string]Callback) *FSM {
+	f := &FSM{
+		transitionerObj: &transitionerStruct{},
+		current:         initial,
+		transitions:     make(map[eKey]string),
+		callbacks:       make(map[cKey]Callback),
+	}
+
+	// Build transition map and store sets of all events and states.
+	allEvents := make(map[string]bool)
+	allStates := make(map[string]bool)
+	for _, e := range events {
+		for _, src := range e.Src {
+			f.transitions[eKey{e.Name, src}] = e.Dst
+			allStates[src] = true
+			allStates[e.Dst] = true
+		}
+		allEvents[e.Name] = true
+	}
+
+	// Map all callbacks to events/states.
+	for name, fn := range callbacks {
+		var target string
+		var callbackType int
+
+		switch {
+		case strings.HasPrefix(name, "before_"):
+			target = strings.TrimPrefix(name, "before_")
+			if target == "event" {
+				target = ""
+				callbackType = callbackBeforeEvent
+			} else if _, ok := allEvents[target]; ok {
+				callbackType = callbackBeforeEvent
+			}
+		case strings.HasPrefix(name, "leave_"):
+			target = strings.TrimPrefix(name, "leave_")
+			if target == "state" {
+				target = ""
+				callbackType = callbackLeaveState
+			} else if _, ok := allStates[target]; ok {
+				callbackType = callbackLeaveState
+			}
+		case strings.HasPrefix(name, "enter_"):
+			target = strings.TrimPrefix(name, "enter_")
+			if target == "state" {
+				target = ""
+				callbackType = callbackEnterState
+			} else if _, ok := allStates[target]; ok {
+				callbackType = callbackEnterState
+			}
+		case strings.HasPrefix(name, "after_"):
+			target = strings.TrimPrefix(name, "after_")
+			if target == "event" {
+				target = ""
+				callbackType = callbackAfterEvent
+			} else if _, ok := allEvents[target]; ok {
+				callbackType = callbackAfterEvent
+			}
+		default:
+			target = name
+			if _, ok := allStates[target]; ok {
+				callbackType = callbackEnterState
+			} else if _, ok := allEvents[target]; ok {
+				callbackType = callbackAfterEvent
+			}
+		}
+
+		if callbackType != callbackNone {
+			f.callbacks[cKey{target, callbackType}] = fn
+		}
+	}
+
+	return f
+}
+
+// Current returns the current state of the FSM.
+func (f *FSM) Current() string {
+	f.stateMu.RLock()
+	defer f.stateMu.RUnlock()
+	return f.current
+}
+
+// Is returns true if state is the current state.
+func (f *FSM) Is(state string) bool {
+	f.stateMu.RLock()
+	defer f.stateMu.RUnlock()
+	return state == f.current
+}
+
+// SetState allows the user to move to the given state from current state.
+// The call does not trigger any callbacks, if defined.
+func (f *FSM) SetState(state string) {
+	f.stateMu.Lock()
+	defer f.stateMu.Unlock()
+	f.current = state
+	return
+}
+
+// Can returns true if event can occur in the current state.
+func (f *FSM) Can(event string) bool {
+	f.stateMu.RLock()
+	defer f.stateMu.RUnlock()
+	_, ok := f.transitions[eKey{event, f.current}]
+	return ok && (f.transition == nil)
+}
+
+// AvailableTransitions returns a list of transitions avilable in the
+// current state.
+func (f *FSM) AvailableTransitions() []string {
+	f.stateMu.RLock()
+	defer f.stateMu.RUnlock()
+	var transitions []string
+	for key := range f.transitions {
+		if key.src == f.current {
+			transitions = append(transitions, key.event)
+		}
+	}
+	return transitions
+}
+
+// Cannot returns true if event can not occure in the current state.
+// It is a convenience method to help code read nicely.
+func (f *FSM) Cannot(event string) bool {
+	return !f.Can(event)
+}
+
+// Event initiates a state transition with the named event.
+//
+// The call takes a variable number of arguments that will be passed to the
+// callback, if defined.
+//
+// It will return nil if the state change is ok or one of these errors:
+//
+// - event X inappropriate because previous transition did not complete
+//
+// - event X inappropriate in current state Y
+//
+// - event X does not exist
+//
+// - internal error on state transition
+//
+// The last error should never occur in this situation and is a sign of an
+// internal bug.
+func (f *FSM) Event(event string, args ...interface{}) error {
+	f.eventMu.Lock()
+	defer f.eventMu.Unlock()
+
+	f.stateMu.RLock()
+	defer f.stateMu.RUnlock()
+
+	if f.transition != nil {
+		return InTransitionError{event}
+	}
+
+	dst, ok := f.transitions[eKey{event, f.current}]
+	if !ok {
+		for ekey := range f.transitions {
+			if ekey.event == event {
+				return InvalidEventError{event, f.current}
+			}
+		}
+		return UnknownEventError{event}
+	}
+
+	e := &Event{f, event, f.current, dst, nil, args, false, false}
+
+	err := f.beforeEventCallbacks(e)
+	if err != nil {
+		return err
+	}
+
+	if f.current == dst {
+		f.afterEventCallbacks(e)
+		return NoTransitionError{e.Err}
+	}
+
+	// Setup the transition, call it later.
+	f.transition = func() {
+		f.stateMu.Lock()
+		f.current = dst
+		f.stateMu.Unlock()
+
+		f.enterStateCallbacks(e)
+		f.afterEventCallbacks(e)
+	}
+
+	if err = f.leaveStateCallbacks(e); err != nil {
+		if _, ok := err.(CanceledError); ok {
+			f.transition = nil
+		}
+		return err
+	}
+
+	// Perform the rest of the transition, if not asynchronous.
+	f.stateMu.RUnlock()
+	err = f.doTransition()
+	f.stateMu.RLock()
+	if err != nil {
+		return InternalError{}
+	}
+
+	return e.Err
+}
+
+// Transition wraps transitioner.transition.
+func (f *FSM) Transition() error {
+	f.eventMu.Lock()
+	defer f.eventMu.Unlock()
+	return f.doTransition()
+}
+
+// doTransition wraps transitioner.transition.
+func (f *FSM) doTransition() error {
+	return f.transitionerObj.transition(f)
+}
+
+// transitionerStruct is the default implementation of the transitioner
+// interface. Other implementations can be swapped in for testing.
+type transitionerStruct struct{}
+
+// Transition completes an asynchrounous state change.
+//
+// The callback for leave_<STATE> must prviously have called Async on its
+// event to have initiated an asynchronous state transition.
+func (t transitionerStruct) transition(f *FSM) error {
+	if f.transition == nil {
+		return NotInTransitionError{}
+	}
+	f.transition()
+	f.transition = nil
+	return nil
+}
+
+// beforeEventCallbacks calls the before_ callbacks, first the named then the
+// general version.
+func (f *FSM) beforeEventCallbacks(e *Event) error {
+	if fn, ok := f.callbacks[cKey{e.Event, callbackBeforeEvent}]; ok {
+		fn(e)
+		if e.canceled {
+			return CanceledError{e.Err}
+		}
+	}
+	if fn, ok := f.callbacks[cKey{"", callbackBeforeEvent}]; ok {
+		fn(e)
+		if e.canceled {
+			return CanceledError{e.Err}
+		}
+	}
+	return nil
+}
+
+// leaveStateCallbacks calls the leave_ callbacks, first the named then the
+// general version.
+func (f *FSM) leaveStateCallbacks(e *Event) error {
+	if fn, ok := f.callbacks[cKey{f.current, callbackLeaveState}]; ok {
+		fn(e)
+		if e.canceled {
+			return CanceledError{e.Err}
+		} else if e.async {
+			return AsyncError{e.Err}
+		}
+	}
+	if fn, ok := f.callbacks[cKey{"", callbackLeaveState}]; ok {
+		fn(e)
+		if e.canceled {
+			return CanceledError{e.Err}
+		} else if e.async {
+			return AsyncError{e.Err}
+		}
+	}
+	return nil
+}
+
+// enterStateCallbacks calls the enter_ callbacks, first the named then the
+// general version.
+func (f *FSM) enterStateCallbacks(e *Event) {
+	if fn, ok := f.callbacks[cKey{f.current, callbackEnterState}]; ok {
+		fn(e)
+	}
+	if fn, ok := f.callbacks[cKey{"", callbackEnterState}]; ok {
+		fn(e)
+	}
+}
+
+// afterEventCallbacks calls the after_ callbacks, first the named then the
+// general version.
+func (f *FSM) afterEventCallbacks(e *Event) {
+	if fn, ok := f.callbacks[cKey{e.Event, callbackAfterEvent}]; ok {
+		fn(e)
+	}
+	if fn, ok := f.callbacks[cKey{"", callbackAfterEvent}]; ok {
+		fn(e)
+	}
+}
+
+const (
+	callbackNone int = iota
+	callbackBeforeEvent
+	callbackLeaveState
+	callbackEnterState
+	callbackAfterEvent
+)
+
+// cKey is a struct key used for keeping the callbacks mapped to a target.
+type cKey struct {
+	// target is either the name of a state or an event depending on which
+	// callback type the key refers to. It can also be "" for a non-targeted
+	// callback like before_event.
+	target string
+
+	// callbackType is the situation when the callback will be run.
+	callbackType int
+}
+
+// eKey is a struct key used for storing the transition map.
+type eKey struct {
+	// event is the name of the event that the keys refers to.
+	event string
+
+	// src is the source from where the event can transition.
+	src string
+}
diff --git a/vendor/github.com/looplab/fsm/go.mod b/vendor/github.com/looplab/fsm/go.mod
new file mode 100644
index 0000000..4ae0575
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/go.mod
@@ -0,0 +1 @@
+module github.com/looplab/fsm
diff --git a/vendor/github.com/looplab/fsm/utils.go b/vendor/github.com/looplab/fsm/utils.go
new file mode 100644
index 0000000..e49892e
--- /dev/null
+++ b/vendor/github.com/looplab/fsm/utils.go
@@ -0,0 +1,45 @@
+package fsm
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Visualize outputs a visualization of a FSM in Graphviz format.
+func Visualize(fsm *FSM) string {
+	var buf bytes.Buffer
+
+	states := make(map[string]int)
+
+	buf.WriteString(fmt.Sprintf(`digraph fsm {`))
+	buf.WriteString("\n")
+
+	// make sure the initial state is at top
+	for k, v := range fsm.transitions {
+		if k.src == fsm.current {
+			states[k.src]++
+			states[v]++
+			buf.WriteString(fmt.Sprintf(`    "%s" -> "%s" [ label = "%s" ];`, k.src, v, k.event))
+			buf.WriteString("\n")
+		}
+	}
+
+	for k, v := range fsm.transitions {
+		if k.src != fsm.current {
+			states[k.src]++
+			states[v]++
+			buf.WriteString(fmt.Sprintf(`    "%s" -> "%s" [ label = "%s" ];`, k.src, v, k.event))
+			buf.WriteString("\n")
+		}
+	}
+
+	buf.WriteString("\n")
+
+	for k := range states {
+		buf.WriteString(fmt.Sprintf(`    "%s";`, k))
+		buf.WriteString("\n")
+	}
+	buf.WriteString(fmt.Sprintln("}"))
+
+	return buf.String()
+}
diff --git a/vendor/github.com/opencord/cordctl/LICENSE.md b/vendor/github.com/opencord/cordctl/LICENSE.md
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/opencord/cordctl/LICENSE.md
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/opencord/cordctl/pkg/format/formatter.go b/vendor/github.com/opencord/cordctl/pkg/format/formatter.go
new file mode 100644
index 0000000..dc92dec
--- /dev/null
+++ b/vendor/github.com/opencord/cordctl/pkg/format/formatter.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2019-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package format
+
+import (
+	"io"
+	"reflect"
+	"regexp"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+	"text/template/parse"
+)
+
+var nameFinder = regexp.MustCompile(`\.([_A-Za-z0-9]*)}}`)
+
+type Format string
+
+func (f Format) IsTable() bool {
+	return strings.HasPrefix(string(f), "table")
+}
+
+func (f Format) Execute(writer io.Writer, withHeaders bool, data interface{}) error {
+	var tabWriter *tabwriter.Writer = nil
+	format := f
+
+	if f.IsTable() {
+		tabWriter = tabwriter.NewWriter(writer, 0, 4, 4, ' ', 0)
+		format = Format(strings.TrimPrefix(string(f), "table"))
+	}
+
+	tmpl, err := template.New("output").Parse(string(format))
+	if err != nil {
+		return err
+	}
+
+	if f.IsTable() && withHeaders {
+		var header string
+		for _, n := range tmpl.Tree.Root.Nodes {
+			switch n.Type() {
+			case parse.NodeText:
+				header += n.String()
+			case parse.NodeString:
+				header += n.String()
+			case parse.NodeAction:
+				found := nameFinder.FindStringSubmatch(n.String())
+				if len(found) == 2 {
+					header += strings.ToUpper(found[1])
+				}
+			}
+		}
+		tabWriter.Write([]byte(header))
+		tabWriter.Write([]byte("\n"))
+
+		slice := reflect.ValueOf(data)
+		if slice.Kind() == reflect.Slice {
+			for i := 0; i < slice.Len(); i++ {
+				tmpl.Execute(tabWriter, slice.Index(i).Interface())
+				tabWriter.Write([]byte("\n"))
+			}
+		} else {
+			tmpl.Execute(tabWriter, data)
+			tabWriter.Write([]byte("\n"))
+		}
+		tabWriter.Flush()
+		return nil
+	}
+
+	slice := reflect.ValueOf(data)
+	if slice.Kind() == reflect.Slice {
+		for i := 0; i < slice.Len(); i++ {
+			tmpl.Execute(writer, slice.Index(i).Interface())
+			writer.Write([]byte("\n"))
+		}
+	} else {
+		tmpl.Execute(writer, data)
+		writer.Write([]byte("\n"))
+	}
+	return nil
+
+}
diff --git a/vendor/github.com/opencord/omci-sim/.gitreview b/vendor/github.com/opencord/omci-sim/.gitreview
new file mode 100644
index 0000000..36dffab
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/.gitreview
@@ -0,0 +1,5 @@
+[gerrit]
+host=gerrit.opencord.org
+port=29418
+project=omci-sim.git
+defaultremote=origin
diff --git a/vendor/github.com/opencord/omci-sim/README.md b/vendor/github.com/opencord/omci-sim/README.md
new file mode 100644
index 0000000..7183f6e
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/README.md
@@ -0,0 +1,3 @@
+# omci-sim
+
+Test
diff --git a/vendor/github.com/opencord/omci-sim/VERSION b/vendor/github.com/opencord/omci-sim/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/vendor/github.com/opencord/omci-sim/go.mod b/vendor/github.com/opencord/omci-sim/go.mod
new file mode 100644
index 0000000..516906d
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/go.mod
@@ -0,0 +1,5 @@
+module github.com/opencord/omci-sim
+
+go 1.12
+
+require github.com/sirupsen/logrus v1.4.2
diff --git a/vendor/github.com/opencord/omci-sim/go.sum b/vendor/github.com/opencord/omci-sim/go.sum
new file mode 100644
index 0000000..46e5ef0
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/go.sum
@@ -0,0 +1,9 @@
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/opencord/omci-sim/omci_anig.go b/vendor/github.com/opencord/omci-sim/omci_anig.go
new file mode 100644
index 0000000..49734a7
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_anig.go
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+type AniGAttributes int
+const (
+	_								= iota
+	SRIndication  				AniGAttributes	= 0x8000
+	TotalTcontNumber 			AniGAttributes	= 0x4000
+	GEMBlockLength				AniGAttributes	= 0x2000
+	PiggybackDBAReporting 		AniGAttributes	= 0x1000
+	WholeONTDBAReporting		AniGAttributes	= 0x0800
+	SFThreshold					AniGAttributes	= 0x0400
+	SDThreshold					AniGAttributes	= 0x0200
+	ARC							AniGAttributes	= 0x0100
+	ARCInterval					AniGAttributes	= 0x0080
+	OpticalSignalLevel			AniGAttributes	= 0x0040
+	LowerOpticalThreshold		AniGAttributes	= 0x0020
+	UpperOpticalThreshold		AniGAttributes	= 0x0010
+	ONTResponseTime				AniGAttributes	= 0x0008
+	TransmitOpticalLeval		AniGAttributes	= 0x0004
+	LowerTransmitPowerThreshold	AniGAttributes	= 0x0002
+	UpperTransmitPowerThreshold	AniGAttributes	= 0x0001
+)
+
+type ANIGAttributeHandler func(*uint, []byte) ([]byte, error)
+
+var ANIGAttributeHandlers = map[AniGAttributes]ANIGAttributeHandler{
+	SRIndication: GetSRIndication,
+	OpticalSignalLevel: GetOpticalSignalLevel,
+	LowerOpticalThreshold: GetLowerOpticalThreshold,
+	UpperOpticalThreshold: GetUpperOpticalThreshold,
+	TotalTcontNumber: GetTotalTcontNumber,
+	GEMBlockLength: GetGEMBlockLength,
+	PiggybackDBAReporting: GetPiggybackDBAReporting,
+	WholeONTDBAReporting: GetWholeONTDBAReporting,
+	SFThreshold: GetSFThreshold,
+	SDThreshold: GetSDThreshold,
+	ARC: GetARC,
+	ARCInterval: GetARCInterval,
+	ONTResponseTime: GetONTResponseTime,
+	TransmitOpticalLeval: GetTransmitOpticalLeval,
+	LowerTransmitPowerThreshold: GetLowerTransmitPowerThreshold,
+	UpperTransmitPowerThreshold: GetUpperTransmitPowerThreshold,
+}
+
+
+func GetANIGAttributes(pos *uint, pkt []byte, content OmciContent) ([]byte, error) {
+	AttributesMask := getAttributeMask(content)
+
+	for index := uint(16); index>=1 ; index-- {
+		Attribute := 1 << (index - 1)
+		reqAttribute := Attribute & AttributesMask
+
+		if reqAttribute != 0 {
+			pkt, _ = ANIGAttributeHandlers[AniGAttributes(reqAttribute)](pos, pkt)
+		}
+	}
+
+	pkt[8] = 0x00 // Command Processed Successfully
+	pkt[9] = uint8(AttributesMask >> 8)
+	pkt[10] = uint8(AttributesMask & 0x00FF)
+
+	return pkt, nil
+
+}
+
+
+func GetSRIndication(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x01
+	*pos++
+	return pkt, nil
+}
+
+func GetOpticalSignalLevel(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0xd7
+	*pos++
+	pkt[*pos] = 0xa9
+	*pos++
+	return pkt, nil
+}
+
+func GetTotalTcontNumber(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x08
+	*pos++
+	return pkt, nil
+}
+
+func GetGEMBlockLength(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	pkt[*pos] = 0x30
+	return pkt, nil
+}
+
+func GetPiggybackDBAReporting (pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	return pkt, nil
+}
+
+func GetWholeONTDBAReporting(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	return pkt, nil
+}
+
+func GetUpperOpticalThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0xff
+	*pos++
+	return pkt, nil
+}
+
+func GetSFThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x03
+	*pos++
+	return pkt, nil
+}
+
+func GetSDThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x05
+	*pos++
+	return pkt, nil
+}
+
+func GetARC(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	return pkt, nil
+}
+
+func GetARCInterval(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	return pkt, nil
+}
+
+func GetONTResponseTime(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x00
+	*pos++
+	pkt[*pos] = 0x00
+	*pos++
+	return pkt, nil
+}
+
+func GetLowerOpticalThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0xff
+	*pos++
+	return pkt, nil
+}
+
+func GetTransmitOpticalLeval(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x07
+	*pos++
+	pkt[*pos] = 0x1e
+	*pos++
+	return pkt, nil
+}
+
+func GetLowerTransmitPowerThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x81
+	*pos++
+	return pkt, nil
+}
+
+func GetUpperTransmitPowerThreshold(pos *uint, pkt []byte) ([]byte, error) {
+	pkt[*pos] = 0x81
+	*pos++
+	return pkt, nil
+}
diff --git a/vendor/github.com/opencord/omci-sim/omci_common.go b/vendor/github.com/opencord/omci-sim/omci_common.go
new file mode 100644
index 0000000..c878780
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_common.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+import (
+	"fmt"
+	log "github.com/sirupsen/logrus"
+)
+
+type OmciError struct {
+	Msg string
+}
+
+func (e *OmciError) Error() string {
+	return fmt.Sprintf("%s", e.Msg)
+}
+
+type OnuKey struct {
+	IntfId, OnuId uint32
+}
+
+func (k OnuKey) String() string {
+	return fmt.Sprintf("Onu {intfid:%d, onuid:%d}", k.IntfId, k.OnuId)
+}
+
+func GetAttributes(class OmciClass, content OmciContent, key OnuKey, pkt []byte) []byte {
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("GetAttributes() invoked")
+
+	switch class {
+	case ANIG:
+		pos := uint(11)
+		pkt, _ = GetANIGAttributes(&pos, pkt, content)
+		return pkt
+
+	case EthernetPMHistoryData:
+		pos := uint(11)
+		pkt, _ = GetEthernetPMHistoryDataAttributes(&pos, pkt, content)
+		return pkt
+	default:
+		// For unimplemented MEs, just fill in the attribute mask and return 0 values for the requested attributes
+		// TODO implement Get for unimplemented MEs as well
+		log.WithFields(log.Fields{
+			"IntfId": key.IntfId,
+			"OnuId": key.OnuId,
+			"class": class,
+		}).Tracef("Unimplemeted GetAttributes for ME Class: %v " +
+		    "Filling with zero value for the requested attributes", class)
+		AttributesMask := getAttributeMask(content)
+		pkt[8] = 0x00 // Command Processed Successfully
+		pkt[9] = uint8(AttributesMask >> 8)
+		pkt[10] = uint8(AttributesMask & 0xFF)
+
+		return pkt
+	}
+}
+
+func getAttributeMask(content OmciContent) int {
+	// mask is present in pkt[8] and pkt[9]
+	log.WithFields(log.Fields{
+		"content[0]": content[0],
+		"content[1]": content[1],
+	}).Tracef("GetAttributeMask() invoked")
+	return (int(content[0]) << 8) | int(content[1])
+}
diff --git a/vendor/github.com/opencord/omci-sim/omci_defs.go b/vendor/github.com/opencord/omci-sim/omci_defs.go
new file mode 100644
index 0000000..44347ef
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_defs.go
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	log "github.com/sirupsen/logrus"
+)
+
+// OMCI Sim definitions
+
+type ChMessageType int
+
+const (
+	GemPortAdded ChMessageType = iota
+)
+
+func (m ChMessageType) String() string {
+	names := [...]string{
+		"GemPortAdded",
+	}
+	return names[m]
+}
+
+type OmciChMessageData struct {
+	IntfId 	uint32
+	OnuId   uint32
+}
+
+type OmciChMessage struct {
+	Type ChMessageType
+	Data OmciChMessageData
+}
+
+//
+// OMCI definitions
+//
+
+// OmciMsgType represents a OMCI message-type
+type OmciMsgType byte
+
+func (t OmciMsgType) PrettyPrint() string {
+	switch t {
+	case Create:
+		return "Create"
+	case Delete:
+		return "Delete"
+	case Set:
+		return "Set"
+	case Get:
+		return "Get"
+	case GetAllAlarms:
+		return "GetAllAlarms"
+	case GetAllAlarmsNext:
+		return "GetAllAlarmsNext"
+	case MibUpload:
+		return "MibUpload"
+	case MibUploadNext:
+		return "MibUploadNext"
+	case MibReset:
+		return "MibReset"
+	case AlarmNotification:
+		return "AlarmNotification"
+	case AttributeValueChange:
+		return "AttributeValueChange"
+	case Test:
+		return "Test"
+	case StartSoftwareDownload:
+		return "StartSoftwareDownload"
+	case DownloadSection:
+		return "DownloadSection"
+	case EndSoftwareDownload:
+		return "EndSoftwareDownload"
+	case ActivateSoftware:
+		return "ActivateSoftware"
+	case CommitSoftware:
+		return "CommitSoftware"
+	case SynchronizeTime:
+		return "SynchronizeTime"
+	case Reboot:
+		return "Reboot"
+	case GetNext:
+		return "GetNext"
+	case TestResult:
+		return "TestResult"
+	case GetCurrentData:
+		return "GetCurrentData"
+	case SetTable:
+		return "SetTable"
+	default:
+		// FIXME
+		// msg="Cant't convert state 68 to string"
+		// msg="Cant't convert state 72 to string"
+		// msg="Cant't convert state 73 to string"
+		// msg="Cant't convert state 75 to string"
+		// msg="Cant't convert state 76 to string"
+		// msg="Cant't convert state 77 to string"
+		// msg="Cant't convert state 78 to string"
+		// msg="Cant't convert state 79 to string"
+		// msg="Cant't convert state 88 to string"
+
+		log.Tracef("Cant't convert OmciMsgType %v to string", t)
+		return string(t)
+	}
+}
+
+const (
+	// Message Types
+	_                                 = iota
+	Create                OmciMsgType = 4
+	Delete                OmciMsgType = 6
+	Set                   OmciMsgType = 8
+	Get                   OmciMsgType = 9
+	GetAllAlarms          OmciMsgType = 11
+	GetAllAlarmsNext      OmciMsgType = 12
+	MibUpload             OmciMsgType = 13
+	MibUploadNext         OmciMsgType = 14
+	MibReset              OmciMsgType = 15
+	AlarmNotification     OmciMsgType = 16
+	AttributeValueChange  OmciMsgType = 17
+	Test                  OmciMsgType = 18
+	StartSoftwareDownload OmciMsgType = 19
+	DownloadSection       OmciMsgType = 20
+	EndSoftwareDownload   OmciMsgType = 21
+	ActivateSoftware      OmciMsgType = 22
+	CommitSoftware        OmciMsgType = 23
+	SynchronizeTime       OmciMsgType = 24
+	Reboot                OmciMsgType = 25
+	GetNext               OmciMsgType = 26
+	TestResult            OmciMsgType = 27
+	GetCurrentData        OmciMsgType = 28
+	SetTable              OmciMsgType = 29 // Defined in Extended Message Set Only
+)
+
+
+// OMCI Managed Entity Class
+type OmciClass uint16
+
+func (c OmciClass) PrettyPrint() string {
+	switch c {
+	case EthernetPMHistoryData:
+		return "EthernetPMHistoryData"
+	case ONUG:
+		return "ONUG"
+	case ANIG:
+		return "ANIG"
+	case GEMPortNetworkCTP:
+		return "GEMPortNetworkCTP"
+	default:
+		log.Tracef("Cant't convert OmciClass %v to string", c)
+		return string(c)
+	}
+}
+
+const (
+	// Managed Entity Class values
+	EthernetPMHistoryData OmciClass = 24
+	ONUG                  OmciClass = 256
+	ANIG                  OmciClass = 263
+	GEMPortNetworkCTP     OmciClass = 268
+)
+
+// OMCI Message Identifier
+type OmciMessageIdentifier struct {
+	Class    OmciClass
+	Instance uint16
+}
+
+type OmciContent [32]byte
+
+type OmciMessage struct {
+	TransactionId uint16
+	MessageType   OmciMsgType
+	DeviceId      uint8
+	MessageId     OmciMessageIdentifier
+	Content       OmciContent
+}
+
+func ParsePkt(pkt []byte) (uint16, uint8, OmciMsgType, OmciClass, uint16, OmciContent, error) {
+	var m OmciMessage
+
+	r := bytes.NewReader(pkt)
+
+	if err := binary.Read(r, binary.BigEndian, &m); err != nil {
+		log.WithFields(log.Fields{
+			"Packet": pkt,
+			"omciMsg": fmt.Sprintf("%x", pkt),
+		}).Errorf("Failed to read packet: %s", err)
+		return 0, 0, 0, 0, 0, OmciContent{}, errors.New("Failed to read packet")
+	}
+	/*    Message Type = Set
+	      0... .... = Destination Bit: 0x0
+	      .1.. .... = Acknowledge Request: 0x1
+	      ..0. .... = Acknowledgement: 0x0
+	      ...0 1000 = Message Type: Set (8)
+	*/
+
+	log.WithFields(log.Fields{
+		"TransactionId": m.TransactionId,
+		"MessageType": m.MessageType.PrettyPrint(),
+		"MeClass": m.MessageId.Class,
+		"MeInstance": m.MessageId.Instance,
+		"Conent": m.Content,
+		"Packet": pkt,
+	}).Tracef("Parsing OMCI Packet")
+
+	return m.TransactionId, m.DeviceId, m.MessageType & 0x1F, m.MessageId.Class, m.MessageId.Instance, m.Content, nil
+}
diff --git a/vendor/github.com/opencord/omci-sim/omci_handlers.go b/vendor/github.com/opencord/omci-sim/omci_handlers.go
new file mode 100644
index 0000000..67a57e7
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_handlers.go
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	log "github.com/sirupsen/logrus"
+)
+
+type OmciMsgHandler func(class OmciClass, content OmciContent, key OnuKey) ([]byte, error)
+
+var Handlers = map[OmciMsgType]OmciMsgHandler{
+	MibReset:         mibReset,
+	MibUpload:        mibUpload,
+	MibUploadNext:    mibUploadNext,
+	Set:              set,
+	Create:           create,
+	Get:              get,
+	GetAllAlarms:     getAllAlarms,
+	GetAllAlarmsNext: getAllAlarmsNext,
+	SynchronizeTime:  syncTime,
+	Delete:           delete,
+	Reboot:           reboot,
+}
+
+func mibReset(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci MibReset")
+	OnuOmciStateMapLock.RLock()
+	if state, ok := OnuOmciStateMap[key]; ok {
+		log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Reseting OnuOmciState")
+		state.ResetOnuOmciState()
+	}
+	OnuOmciStateMapLock.RUnlock()
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	return pkt, nil
+}
+
+func mibUpload(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci MibUpload")
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	pkt[8] = NumMibUploadsHigherByte
+	pkt[9] = NumMibUploadsLowerByte
+
+	return pkt, nil
+}
+
+func mibUploadNext(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+	OnuOmciStateMapLock.RLock()
+	state := OnuOmciStateMap[key]
+	OnuOmciStateMapLock.RUnlock()
+	// commandNumber is the "Command number" attribute received in "MIB Upload Next" OMCI message
+	commandNumber := (uint16(content[1])) | (uint16(content[0])<<8)
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+		"CommandNumber": commandNumber,
+	}).Tracef("Omci MibUploadNext")
+
+	switch commandNumber {
+	case 0:
+		// ONT Data (2)
+		// log.Println("ONT DATA")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x02, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 1:
+		// Circuit Pack (6) - #1
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x01, 0xf0, 0x00, 0x2f, 0x04,
+			0x49, 0x53, 0x4b, 0x54, 0x71, 0xe8, 0x00, 0x80,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 2:
+		// Circuit Pack (6) - #2
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x01, 0x0f, 0x00, 0x42, 0x52,
+			0x43, 0x4d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 3:
+		// Circuit Pack (6) - #3
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x01, 0x00, 0xf8, 0x20, 0x20,
+			0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+			0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+			0x20, 0x20, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 4:
+		// Circuit Pack (6) - #4
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x01, 0x00, 0x04, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 5:
+		// Circuit Pack (6) - #5
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x80, 0xf0, 0x00, 0xee, 0x01,
+			0x49, 0x53, 0x4b, 0x54, 0x71, 0xe8, 0x00, 0x80,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 6:
+		// Circuit Pack (6) - #6
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x80, 0x0f, 0x00, 0x42, 0x52,
+			0x43, 0x4d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 7:
+		// Circuit Pack (6) - #7
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x80, 0x00, 0xf8, 0x20, 0x20,
+			0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+			0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+			0x20, 0x20, 0x00, 0x08, 0x40, 0x10, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 8:
+		// Circuit Pack (6) - #8
+		// log.Println("Circuit Pack")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x06, 0x01, 0x80, 0x00, 0x04, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 9, 10, 11, 12:
+		// PPTP (11)
+		// log.Println("PPTP")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x00, 0x0b, 0x01, 0x01, 0xff, 0xfe, 0x00, 0x2f,
+			0x00, 0x00, 0x00, 0x00, 0x03, 0x05, 0xee, 0x00,
+			0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+		pkt[11] = state.pptpInstance // ME Instance
+		state.pptpInstance++
+	case 13, 14, 15, 16, 17, 18, 19, 20:
+		// T-CONT (262)
+		// log.Println("T-CONT")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x06, 0x80, 0x00, 0xe0, 0x00, 0xff, 0xff,
+			0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+		state.tcontInstance++
+		pkt[11] = state.tcontInstance // TCONT ME Instance
+
+	case 21:
+		// ANI-G (263)
+		// log.Println("ANI-G")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x07, 0x80, 0x01, 0xff, 0xff, 0x01, 0x00,
+			0x08, 0x00, 0x30, 0x00, 0x00, 0x05, 0x09, 0x00,
+			0x00, 0xe0, 0x54, 0xff, 0xff, 0x00, 0x00, 0x0c,
+			0x63, 0x81, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	case 22, 23, 24, 25:
+		// UNI-G (264)
+		// log.Println("UNI-G")
+		pkt = []byte{
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x08, 0x01, 0x01, 0xf8, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+		pkt[11] = state.uniGInstance // UNI-G ME Instance
+		state.uniGInstance++
+
+	case 26,  30,  34,  38,  42, 46, 50, 54,
+	     58,  62,  66,  70,  74, 78, 82, 86,
+	     90,  94,  98,  102, 106, 110, 114, 118,
+	     122, 126, 130, 134, 138, 142, 146, 150,
+	     154, 158, 162, 166, 170, 174, 178, 182,
+	     186, 190, 194, 198, 202, 206, 210, 214,
+	     218, 222, 226, 230, 234, 238, 242, 246,
+	     250, 254, 258, 262, 266, 270, 274, 278:
+		// Prior-Q with mask downstream
+		log.Tracef("Mib-upload for prior-q with mask")
+		// For downstream PQ, pkt[10] is 0x00
+		// So the instanceId will be like 0x0001, 0x0002,... etc
+		pkt = []byte{
+			0x00, 0x42, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x15, 0x00, 0x00, 0x00, 0x0f, 0xff, 0xff,
+			0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+		state.priorQInstance++
+		pkt[11] = state.priorQInstance
+
+	case 27,  31,  35,  39,  43,  47, 51, 55,
+	     59,  63,  67,  71,  75,  79, 83, 87,
+	     91,  95,  99,  103, 107, 111, 115, 119,
+	     123, 127, 131, 135, 139, 143, 147, 151,
+	     155, 159, 163, 167, 171, 175, 179, 183,
+	     187, 191, 195, 199, 203, 207, 211, 215,
+	     219, 223, 227, 231, 235, 239, 243, 247,
+	     251, 255, 259, 263, 267, 271, 275, 279:
+		// Prior-Q with attribute list downstream
+		// log.Println("Mib-upload for prior-q with attribute list")
+		pkt = []byte{
+			0x00, 0x43, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x15, 0x00, 0x00, 0xff, 0xf0, 0x00, 0x01,
+			0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+			0x20, 0x00, 0x00, 0x01, 0x20, 0x01, 0x00, 0x01,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+
+		pkt[11] = state.priorQInstance
+
+		// Group 8 PriorQ to point to a particular TCONT
+		// Priority for each group of PriorQ is 0 - 7
+		if state.priorQPriority % NumPriorQPerTcont == 0 {
+			state.tcontPointer++
+		}
+
+		// Only for verification. To be removed
+		if state.tcontPointer > state.tcontInstance {
+			log.Tracef("Error: Invalid TcontPointer")
+			break
+		}
+
+		pkt[24] = state.tcontPointer
+		pkt[26] = state.priorQPriority % NumPriorQPerTcont
+		pkt[28] = state.tcontInstance
+
+	case 28,  32,  36,  40,  44, 48, 52, 56,
+	     60,  64,  68,  72,  76, 80, 84, 88,
+	     92,  96,  100, 104, 108, 112, 116, 120,
+	     124, 128, 132, 136, 140, 144, 148, 152,
+	     156, 160, 164, 168, 172, 176, 180, 184,
+	     188, 192, 196, 200, 204, 208, 212, 216,
+	     220, 224, 228, 232, 236, 240, 244, 248,
+	     252, 256, 260, 264, 268, 272, 276, 280:
+		// Prior-Q with mask upstream
+		// log.Println("Mib-upload for prior-q with mask")
+		pkt = []byte{
+			0x00, 0x42, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x15, 0x80, 0x00, 0x00, 0x0f, 0xff, 0xff,
+			0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+		pkt[11] = state.priorQInstance
+
+	case 29,  33,  37, 41, 45, 49, 53, 57,
+	     61,  65,  69, 73, 77, 81, 85, 89,
+	     93, 97,  101, 105, 109, 113, 117, 121,
+	     125, 129, 133, 137, 141, 145, 149, 153,
+	     157, 161, 165, 169, 173, 177, 181, 185,
+	     189, 193, 197, 201, 205, 209, 213, 217,
+	     221, 225, 229, 233, 237, 241, 245, 249,
+	     253, 257, 261, 265, 269, 273, 277, 281:
+		// Prior-Q with attribute list upstream
+		// log.Println("Mib-upload for prior-q with attribute list")
+		// For upstream pkt[10] is fixed as 80
+		// So for upstream PQ, instanceId will be like 0x8001, 0x8002 ... etc
+		pkt = []byte{
+			0x00, 0x43, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x15, 0x80, 0x00, 0xff, 0xf0, 0x00, 0x01,
+			0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+			0x20, 0x00, 0x00, 0x80, 0x20, 0x01, 0x00, 0x01,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+
+		pkt[11] = state.priorQInstance
+		pkt[24] = state.tcontPointer
+		pkt[26] = state.priorQPriority % NumPriorQPerTcont // Priority of this PriorQ
+		pkt[28] = state.tcontInstance
+		state.priorQPriority++
+
+
+	case 282, 283, 284, 285, 286, 287, 288, 289:
+		// Traffic Scheduler
+		// log.Println("Traffic Scheduler")
+		pkt = []byte{
+			0x02, 0xa4, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x16, 0x80, 0x00, 0xf0, 0x00, 0x80, 0x00,
+			0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+
+		pkt[15] = state.tcontInstance
+		state.tcontInstance++
+
+	case 290:
+		// ONT-2G
+		// log.Println("ONT-2G")
+		pkt = []byte{
+			0x00, 0x16, 0x2e, 0x0a, 0x00, 0x02, 0x00, 0x00,
+			0x01, 0x01, 0x00, 0x00, 0x07, 0xfc, 0x00, 0x40,
+			0x08, 0x01, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x7f, 0x00, 0x00, 0x3f, 0x00, 0x01, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		}
+
+	default:
+		state.extraMibUploadCtr++
+		errstr := fmt.Sprintf("%v - Invalid MibUpload request: %d, extras: %d", key, state.mibUploadCtr, state)
+		return nil, errors.New(errstr)
+	}
+
+	state.mibUploadCtr++
+	return pkt, nil
+}
+
+func set(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci Set")
+
+	return pkt, nil
+}
+
+func create(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	if class == GEMPortNetworkCTP {
+		OnuOmciStateMapLock.RLock()
+		defer OnuOmciStateMapLock.RUnlock()
+		if onuOmciState, ok := OnuOmciStateMap[key]; !ok {
+			log.WithFields(log.Fields{
+				"IntfId": key.IntfId,
+				"OnuId": key.OnuId,
+			}).Tracef("ONU Key Error")
+			return nil, errors.New("ONU Key Error")
+		} else {
+			onuOmciState.gemPortId = binary.BigEndian.Uint16(content[:2])
+			log.WithFields(log.Fields{
+				"IntfId": key.IntfId,
+				"OnuId": key.OnuId,
+			}).Tracef("Gem Port Id %d", key, onuOmciState)
+			// FIXME
+			OnuOmciStateMap[key].state = DONE
+			omciCh <- OmciChMessage{
+				Type: GemPortAdded,
+				Data: OmciChMessageData{
+					OnuId: key.OnuId,
+					IntfId: key.IntfId,
+				},
+			}
+		}
+	}
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x01,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci Create")
+
+	return pkt, nil
+}
+
+func get(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x02, 0x01,
+		0x00, 0x20, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	pkt = GetAttributes(class, content, key, pkt)
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci Get")
+	return pkt, nil
+}
+
+func getAllAlarms(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+		0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci GetAllAlarms")
+
+	return pkt, nil
+}
+
+func syncTime(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci syncTime")
+
+	return pkt, nil
+}
+
+func getAllAlarmsNext(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci GetAllAlarmsNext")
+
+	return pkt, nil
+}
+
+func delete(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x0b, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci Delete")
+
+	return pkt, nil
+}
+
+func reboot(class OmciClass, content OmciContent, key OnuKey) ([]byte, error) {
+	var pkt []byte
+	pkt = []byte{
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+
+	log.WithFields(log.Fields{
+		"IntfId": key.IntfId,
+		"OnuId": key.OnuId,
+	}).Tracef("Omci Reboot")
+	return pkt, nil
+}
+
diff --git a/vendor/github.com/opencord/omci-sim/omci_mib.go b/vendor/github.com/opencord/omci-sim/omci_mib.go
new file mode 100644
index 0000000..b8d2cdc
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_mib.go
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+const NumMibUploadsHigherByte byte = 0x01
+const NumMibUploadsLowerByte byte = 0x23
+const NumPriorQPerTcont = 0x08 // NumPriorQPerTcont is the number of priority queues associated with a single tcont
+
diff --git a/vendor/github.com/opencord/omci-sim/omci_pm_history_data.go b/vendor/github.com/opencord/omci-sim/omci_pm_history_data.go
new file mode 100644
index 0000000..60ddd80
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_pm_history_data.go
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+type PerformanceMonitoringHistoryData int
+
+const (
+	_                                                                = iota
+	IntervalEndTime                 PerformanceMonitoringHistoryData = 0x8000
+	ThresholdDataId                 PerformanceMonitoringHistoryData = 0x4000
+	FCSErrors                       PerformanceMonitoringHistoryData = 0x2000
+	ExcessiveCollisionCounter       PerformanceMonitoringHistoryData = 0x1000
+	LateCollisionCounter            PerformanceMonitoringHistoryData = 0x0800
+	FrameTooLong                    PerformanceMonitoringHistoryData = 0x0400
+	BufferOverflowOnReceive         PerformanceMonitoringHistoryData = 0x0200
+	BufferOverflowOnTransmit        PerformanceMonitoringHistoryData = 0x0100
+	SingleCollisionFrameCounter     PerformanceMonitoringHistoryData = 0x0080
+	MultipleCollisionFrameCounter   PerformanceMonitoringHistoryData = 0x0040
+	SQECounter                      PerformanceMonitoringHistoryData = 0x0020
+	DeferredTransmissionCounter     PerformanceMonitoringHistoryData = 0x0010
+	InternalMACTransmitErrorCounter PerformanceMonitoringHistoryData = 0x0008
+	CarrierSenseErrorCounter        PerformanceMonitoringHistoryData = 0x0004
+	AllignmentErrorCounter          PerformanceMonitoringHistoryData = 0x0002
+	InternalMACReceiveErrorCounter  PerformanceMonitoringHistoryData = 0x0001
+)
+
+type PMHistoryAttributeHandler func(*uint, []byte) ([]byte, error)
+
+var PMHistoryAttributeHandlers = map[PerformanceMonitoringHistoryData]ANIGAttributeHandler{
+	IntervalEndTime :                GetIntervalEndTime,
+	ThresholdDataId:                 GetThresholdDataId,
+	FCSErrors:                       GetFCSErrors,
+	ExcessiveCollisionCounter:       GetExcessiveCollisionCounter,
+	LateCollisionCounter:            GetLateCollisionCounter,
+	FrameTooLong:                    GetFrameTooLong,
+	BufferOverflowOnReceive:         GetBufferOverflowOnReceive,
+	BufferOverflowOnTransmit:        GetBufferOverflowOnTransmit,
+	SingleCollisionFrameCounter:     GetSingleCollisionFrameCounter,
+	MultipleCollisionFrameCounter:   GetMultipleCollisionFrameCounter,
+	SQECounter:                      GetSQECounter,
+	DeferredTransmissionCounter:     GetDeferredTransmissionCounter,
+	InternalMACTransmitErrorCounter: GetInternalMACTransmitErrorCounter,
+	CarrierSenseErrorCounter:        GetCarrierSenseErrorCounter,
+	AllignmentErrorCounter:          GetAllignmentErrorCounter,
+	InternalMACReceiveErrorCounter:  GetInternalMACReceiveErrorCounter,
+}
+
+func GetEthernetPMHistoryDataAttributes(pos *uint, pkt []byte, content OmciContent) ([]byte, error) {
+	AttributesMask := getAttributeMask(content)
+
+	for index := uint(16); index>=1 ; index-- {
+		Attribute := 1 << (index - 1)
+		reqAttribute := Attribute & AttributesMask
+
+		if reqAttribute != 0 {
+			pkt, _ = PMHistoryAttributeHandlers[PerformanceMonitoringHistoryData(reqAttribute)](pos, pkt)
+		}
+	}
+
+	pkt[8] = 0x00 // Command Processed Successfully
+	pkt[9] = uint8(AttributesMask >> 8)
+	pkt[10] = uint8(AttributesMask & 0x00FF)
+
+	return pkt, nil
+
+}
+
+func GetIntervalEndTime(pos *uint, pkt []byte) ([]byte, error) {
+	// With the hardware, it is seen that all attributes are 0x00
+	// Nevertheless these functions are made to provide specific values in future if required
+	*pos++
+	return pkt, nil
+}
+
+func GetThresholdDataId(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetFCSErrors(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetExcessiveCollisionCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetLateCollisionCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetFrameTooLong(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetBufferOverflowOnReceive(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetBufferOverflowOnTransmit(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetSingleCollisionFrameCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetMultipleCollisionFrameCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetSQECounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetDeferredTransmissionCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetInternalMACTransmitErrorCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetCarrierSenseErrorCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetAllignmentErrorCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+func GetInternalMACReceiveErrorCounter(pos *uint, pkt []byte) ([]byte, error) {
+	*pos++
+	*pos++
+	*pos++
+	*pos++
+	return pkt, nil
+}
+
+
diff --git a/vendor/github.com/opencord/omci-sim/omci_sim.go b/vendor/github.com/opencord/omci-sim/omci_sim.go
new file mode 100644
index 0000000..a792911
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_sim.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package core
+
+import (
+	"fmt"
+	log "github.com/sirupsen/logrus"
+)
+
+var omciCh = make(chan OmciChMessage)
+
+func GetChannel() chan OmciChMessage {
+	return omciCh
+}
+
+func OmciSim(intfId uint32, onuId uint32, request []byte) ([]byte, error) {
+	var resp []byte
+
+	transactionId, deviceId, msgType, class, instance, content, err := ParsePkt(request)
+	if err != nil {
+		log.WithFields(log.Fields{
+			"IntfId": intfId,
+			"OnuId": onuId,
+		}).Errorf("Cannot parse OMCI msg")
+		return resp, &OmciError{"Cannot parse OMCI msg"}
+	}
+
+	log.WithFields(log.Fields{
+		"IntfId": intfId,
+		"OnuId": onuId,
+		"TransactionId": transactionId,
+		"MessageType": msgType.PrettyPrint(),
+		"MeClass": class,
+		"MeInstance": instance,
+		//"Conent": content,
+		"omciMsg": fmt.Sprintf("%x", content),
+	}).Tracef("Processing OMCI pakcet")
+
+	key := OnuKey{intfId, onuId}
+	OnuOmciStateMapLock.Lock()
+	if _, ok := OnuOmciStateMap[key]; !ok {
+		OnuOmciStateMap[key] = NewOnuOmciState()
+	}
+	OnuOmciStateMapLock.Unlock()
+
+	if _, ok := Handlers[msgType]; !ok {
+		log.WithFields(log.Fields{
+			"IntfId": intfId,
+			"OnuId": onuId,
+			"msgType": msgType,
+		}).Errorf("Ignoring omci msg (msgType %d not handled)", msgType)
+		return resp, &OmciError{"Unimplemented omci msg"}
+	}
+
+	resp, err = Handlers[msgType](class, content, key)
+	if err != nil {
+		log.WithFields(log.Fields{
+			"IntfId": intfId,
+			"OnuId": onuId,
+			"msgType": msgType,
+		}).Errorf("Unable to send a successful response, error: %s", err)
+		return resp, nil
+	}
+
+	// In the OMCI message, first 2-bytes is the Transaction Correlation ID
+	resp[0] = byte(transactionId >> 8)
+	resp[1] = byte(transactionId & 0xFF)
+	resp[2] = 0x2<<4 | byte(msgType) // Upper nibble 0x2 is fixed (0010), Lower nibbles defines the msg type (i.e., mib-upload, mib-upload-next, etc)
+	resp[3] = deviceId
+
+	// for create, get and set
+	if ((msgType & 0xFF) != MibUploadNext) && ((msgType & 0xFF) != MibReset) && ((msgType & 0xFF) != MibUpload) {
+		// Common fields for create, get, and set
+		resp[4] = byte(class >> 8)
+		resp[5] = byte(class & 0xFF)
+		resp[6] = byte(instance >> 8)
+		resp[7] = byte(instance & 0xFF)
+		resp[8] = 0 // Result: Command Processed Successfully
+
+		// Hardcoding class specific values for Get
+		if (class == 0x82) && ((msgType & 0x0F) == Get) {
+			resp[9] = 0
+			resp[10] = 0x78
+
+		} else if (class == 0x2F) && ((msgType & 0x0F) == Get) {
+			resp[9] = 0x0F
+			resp[10] = 0xB8
+		} else if (class == 0x138) && ((msgType & 0x0F) == Get) {
+			resp[9] = content[0] // 0xBE
+			resp[10] = 0x00
+		}
+	}
+
+	log.WithFields(log.Fields{
+		"IntfId": intfId,
+		"OnuId": onuId,
+		"msgType": msgType.PrettyPrint(),
+		"omciMsg": fmt.Sprintf("%x", resp),
+	}).Tracef("OMCI-SIM Response")
+
+	return resp, nil
+}
diff --git a/vendor/github.com/opencord/omci-sim/omci_state.go b/vendor/github.com/opencord/omci-sim/omci_state.go
new file mode 100644
index 0000000..eb923ba
--- /dev/null
+++ b/vendor/github.com/opencord/omci-sim/omci_state.go
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	log "github.com/sirupsen/logrus"
+)
+
+type OnuOmciState struct {
+	gemPortId         uint16
+	mibUploadCtr      uint16
+	extraMibUploadCtr uint16 // this is only for debug purposes, will be removed in the future
+	uniGInstance      uint8
+	tcontInstance     uint8
+	pptpInstance      uint8
+	priorQInstance    uint8 // To assign incrementing value to PQ instance-Id
+	priorQPriority	  uint8 // Priority of the PriorityQueueG (0-7)
+	tcontPointer      uint8 // Tcont Pointer for PriorQ
+	state             istate
+}
+
+type istate int
+
+// TODO - Needs to reflect real ONU/OMCI state
+const (
+	INCOMPLETE istate = iota
+	DONE
+)
+
+var OnuOmciStateMap = map[OnuKey]*OnuOmciState{}
+var OnuOmciStateMapLock = sync.RWMutex{}
+
+func NewOnuOmciState() *OnuOmciState {
+	return &OnuOmciState{gemPortId: 0, mibUploadCtr: 0, uniGInstance: 1, tcontInstance: 0, pptpInstance: 1}
+}
+func (s *OnuOmciState) ResetOnuOmciState() {
+	// Resetting the counters  
+	s.mibUploadCtr = 0
+	s.extraMibUploadCtr = 0
+	s.gemPortId = 0
+	s.uniGInstance = 1
+	s.tcontInstance = 0
+	s.pptpInstance = 1
+	s.tcontPointer = 0
+	s.priorQPriority = 0
+}
+func GetOnuOmciState(intfId uint32, onuId uint32) istate {
+	key := OnuKey{intfId, onuId}
+	OnuOmciStateMapLock.RLock()
+	defer OnuOmciStateMapLock.RUnlock()
+	if onu, ok := OnuOmciStateMap[key]; ok {
+		return onu.state
+	} else {
+		return INCOMPLETE
+	}
+}
+
+func GetGemPortId(intfId uint32, onuId uint32) (uint16, error) {
+	key := OnuKey{intfId, onuId}
+	OnuOmciStateMapLock.RLock()
+	defer OnuOmciStateMapLock.RUnlock()
+	if OnuOmciState, ok := OnuOmciStateMap[key]; ok {
+		if OnuOmciState.state != DONE {
+			errmsg := fmt.Sprintf("ONU {intfid:%d, onuid:%d} - Not DONE (GemportID is not set)", intfId, onuId)
+			return 0, errors.New(errmsg)
+		}
+		return OnuOmciState.gemPortId, nil
+	}
+	errmsg := fmt.Sprintf("ONU {intfid:%d, onuid:%d} - Failed to find a key in OnuOmciStateMap", intfId, onuId)
+	return 0, errors.New(errmsg)
+}
+
+func CheckIsTeo() string {
+	log.Warn("It's TEO!")
+	return "It's TEO!"
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v2/go/openolt/openolt.pb.go b/vendor/github.com/opencord/voltha-protos/v2/go/openolt/openolt.pb.go
new file mode 100644
index 0000000..ea403af
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v2/go/openolt/openolt.pb.go
@@ -0,0 +1,3973 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/openolt.proto
+
+package openolt
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	tech_profile "github.com/opencord/voltha-protos/v2/go/tech_profile"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// SchedulerConfig from public import voltha_protos/tech_profile.proto
+type SchedulerConfig = tech_profile.SchedulerConfig
+
+// TrafficShapingInfo from public import voltha_protos/tech_profile.proto
+type TrafficShapingInfo = tech_profile.TrafficShapingInfo
+
+// TrafficScheduler from public import voltha_protos/tech_profile.proto
+type TrafficScheduler = tech_profile.TrafficScheduler
+
+// TrafficSchedulers from public import voltha_protos/tech_profile.proto
+type TrafficSchedulers = tech_profile.TrafficSchedulers
+
+// TailDropDiscardConfig from public import voltha_protos/tech_profile.proto
+type TailDropDiscardConfig = tech_profile.TailDropDiscardConfig
+
+// RedDiscardConfig from public import voltha_protos/tech_profile.proto
+type RedDiscardConfig = tech_profile.RedDiscardConfig
+
+// WRedDiscardConfig from public import voltha_protos/tech_profile.proto
+type WRedDiscardConfig = tech_profile.WRedDiscardConfig
+
+// DiscardConfig from public import voltha_protos/tech_profile.proto
+type DiscardConfig = tech_profile.DiscardConfig
+type DiscardConfig_TailDropDiscardConfig = tech_profile.DiscardConfig_TailDropDiscardConfig
+type DiscardConfig_RedDiscardConfig = tech_profile.DiscardConfig_RedDiscardConfig
+type DiscardConfig_WredDiscardConfig = tech_profile.DiscardConfig_WredDiscardConfig
+
+// TrafficQueue from public import voltha_protos/tech_profile.proto
+type TrafficQueue = tech_profile.TrafficQueue
+
+// TrafficQueues from public import voltha_protos/tech_profile.proto
+type TrafficQueues = tech_profile.TrafficQueues
+
+// Direction from public import voltha_protos/tech_profile.proto
+type Direction = tech_profile.Direction
+
+var Direction_name = tech_profile.Direction_name
+var Direction_value = tech_profile.Direction_value
+
+const Direction_UPSTREAM = Direction(tech_profile.Direction_UPSTREAM)
+const Direction_DOWNSTREAM = Direction(tech_profile.Direction_DOWNSTREAM)
+const Direction_BIDIRECTIONAL = Direction(tech_profile.Direction_BIDIRECTIONAL)
+
+// SchedulingPolicy from public import voltha_protos/tech_profile.proto
+type SchedulingPolicy = tech_profile.SchedulingPolicy
+
+var SchedulingPolicy_name = tech_profile.SchedulingPolicy_name
+var SchedulingPolicy_value = tech_profile.SchedulingPolicy_value
+
+const SchedulingPolicy_WRR = SchedulingPolicy(tech_profile.SchedulingPolicy_WRR)
+const SchedulingPolicy_StrictPriority = SchedulingPolicy(tech_profile.SchedulingPolicy_StrictPriority)
+const SchedulingPolicy_Hybrid = SchedulingPolicy(tech_profile.SchedulingPolicy_Hybrid)
+
+// AdditionalBW from public import voltha_protos/tech_profile.proto
+type AdditionalBW = tech_profile.AdditionalBW
+
+var AdditionalBW_name = tech_profile.AdditionalBW_name
+var AdditionalBW_value = tech_profile.AdditionalBW_value
+
+const AdditionalBW_AdditionalBW_None = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_None)
+const AdditionalBW_AdditionalBW_NA = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_NA)
+const AdditionalBW_AdditionalBW_BestEffort = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_BestEffort)
+const AdditionalBW_AdditionalBW_Auto = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_Auto)
+
+// DiscardPolicy from public import voltha_protos/tech_profile.proto
+type DiscardPolicy = tech_profile.DiscardPolicy
+
+var DiscardPolicy_name = tech_profile.DiscardPolicy_name
+var DiscardPolicy_value = tech_profile.DiscardPolicy_value
+
+const DiscardPolicy_TailDrop = DiscardPolicy(tech_profile.DiscardPolicy_TailDrop)
+const DiscardPolicy_WTailDrop = DiscardPolicy(tech_profile.DiscardPolicy_WTailDrop)
+const DiscardPolicy_Red = DiscardPolicy(tech_profile.DiscardPolicy_Red)
+const DiscardPolicy_WRed = DiscardPolicy(tech_profile.DiscardPolicy_WRed)
+
+// InferredAdditionBWIndication from public import voltha_protos/tech_profile.proto
+type InferredAdditionBWIndication = tech_profile.InferredAdditionBWIndication
+
+var InferredAdditionBWIndication_name = tech_profile.InferredAdditionBWIndication_name
+var InferredAdditionBWIndication_value = tech_profile.InferredAdditionBWIndication_value
+
+const InferredAdditionBWIndication_InferredAdditionBWIndication_None = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_None)
+const InferredAdditionBWIndication_InferredAdditionBWIndication_Assured = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_Assured)
+const InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort)
+
+type DeviceInfo_DeviceResourceRanges_Pool_PoolType int32
+
+const (
+	DeviceInfo_DeviceResourceRanges_Pool_ONU_ID     DeviceInfo_DeviceResourceRanges_Pool_PoolType = 0
+	DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID   DeviceInfo_DeviceResourceRanges_Pool_PoolType = 1
+	DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID DeviceInfo_DeviceResourceRanges_Pool_PoolType = 2
+	DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID    DeviceInfo_DeviceResourceRanges_Pool_PoolType = 3
+)
+
+var DeviceInfo_DeviceResourceRanges_Pool_PoolType_name = map[int32]string{
+	0: "ONU_ID",
+	1: "ALLOC_ID",
+	2: "GEMPORT_ID",
+	3: "FLOW_ID",
+}
+
+var DeviceInfo_DeviceResourceRanges_Pool_PoolType_value = map[string]int32{
+	"ONU_ID":     0,
+	"ALLOC_ID":   1,
+	"GEMPORT_ID": 2,
+	"FLOW_ID":    3,
+}
+
+func (x DeviceInfo_DeviceResourceRanges_Pool_PoolType) String() string {
+	return proto.EnumName(DeviceInfo_DeviceResourceRanges_Pool_PoolType_name, int32(x))
+}
+
+func (DeviceInfo_DeviceResourceRanges_Pool_PoolType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0, 0}
+}
+
+type DeviceInfo_DeviceResourceRanges_Pool_SharingType int32
+
+const (
+	DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF           DeviceInfo_DeviceResourceRanges_Pool_SharingType = 0
+	DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH  DeviceInfo_DeviceResourceRanges_Pool_SharingType = 1
+	DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_SAME_TECH DeviceInfo_DeviceResourceRanges_Pool_SharingType = 2
+)
+
+var DeviceInfo_DeviceResourceRanges_Pool_SharingType_name = map[int32]string{
+	0: "DEDICATED_PER_INTF",
+	1: "SHARED_BY_ALL_INTF_ALL_TECH",
+	2: "SHARED_BY_ALL_INTF_SAME_TECH",
+}
+
+var DeviceInfo_DeviceResourceRanges_Pool_SharingType_value = map[string]int32{
+	"DEDICATED_PER_INTF":           0,
+	"SHARED_BY_ALL_INTF_ALL_TECH":  1,
+	"SHARED_BY_ALL_INTF_SAME_TECH": 2,
+}
+
+func (x DeviceInfo_DeviceResourceRanges_Pool_SharingType) String() string {
+	return proto.EnumName(DeviceInfo_DeviceResourceRanges_Pool_SharingType_name, int32(x))
+}
+
+func (DeviceInfo_DeviceResourceRanges_Pool_SharingType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0, 1}
+}
+
+type Indication struct {
+	// Types that are valid to be assigned to Data:
+	//	*Indication_OltInd
+	//	*Indication_IntfInd
+	//	*Indication_IntfOperInd
+	//	*Indication_OnuDiscInd
+	//	*Indication_OnuInd
+	//	*Indication_OmciInd
+	//	*Indication_PktInd
+	//	*Indication_PortStats
+	//	*Indication_FlowStats
+	//	*Indication_AlarmInd
+	Data                 isIndication_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Indication) Reset()         { *m = Indication{} }
+func (m *Indication) String() string { return proto.CompactTextString(m) }
+func (*Indication) ProtoMessage()    {}
+func (*Indication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{0}
+}
+
+func (m *Indication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Indication.Unmarshal(m, b)
+}
+func (m *Indication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Indication.Marshal(b, m, deterministic)
+}
+func (m *Indication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Indication.Merge(m, src)
+}
+func (m *Indication) XXX_Size() int {
+	return xxx_messageInfo_Indication.Size(m)
+}
+func (m *Indication) XXX_DiscardUnknown() {
+	xxx_messageInfo_Indication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Indication proto.InternalMessageInfo
+
+type isIndication_Data interface {
+	isIndication_Data()
+}
+
+type Indication_OltInd struct {
+	OltInd *OltIndication `protobuf:"bytes,1,opt,name=olt_ind,json=oltInd,proto3,oneof"`
+}
+
+type Indication_IntfInd struct {
+	IntfInd *IntfIndication `protobuf:"bytes,2,opt,name=intf_ind,json=intfInd,proto3,oneof"`
+}
+
+type Indication_IntfOperInd struct {
+	IntfOperInd *IntfOperIndication `protobuf:"bytes,3,opt,name=intf_oper_ind,json=intfOperInd,proto3,oneof"`
+}
+
+type Indication_OnuDiscInd struct {
+	OnuDiscInd *OnuDiscIndication `protobuf:"bytes,4,opt,name=onu_disc_ind,json=onuDiscInd,proto3,oneof"`
+}
+
+type Indication_OnuInd struct {
+	OnuInd *OnuIndication `protobuf:"bytes,5,opt,name=onu_ind,json=onuInd,proto3,oneof"`
+}
+
+type Indication_OmciInd struct {
+	OmciInd *OmciIndication `protobuf:"bytes,6,opt,name=omci_ind,json=omciInd,proto3,oneof"`
+}
+
+type Indication_PktInd struct {
+	PktInd *PacketIndication `protobuf:"bytes,7,opt,name=pkt_ind,json=pktInd,proto3,oneof"`
+}
+
+type Indication_PortStats struct {
+	PortStats *PortStatistics `protobuf:"bytes,8,opt,name=port_stats,json=portStats,proto3,oneof"`
+}
+
+type Indication_FlowStats struct {
+	FlowStats *FlowStatistics `protobuf:"bytes,9,opt,name=flow_stats,json=flowStats,proto3,oneof"`
+}
+
+type Indication_AlarmInd struct {
+	AlarmInd *AlarmIndication `protobuf:"bytes,10,opt,name=alarm_ind,json=alarmInd,proto3,oneof"`
+}
+
+func (*Indication_OltInd) isIndication_Data() {}
+
+func (*Indication_IntfInd) isIndication_Data() {}
+
+func (*Indication_IntfOperInd) isIndication_Data() {}
+
+func (*Indication_OnuDiscInd) isIndication_Data() {}
+
+func (*Indication_OnuInd) isIndication_Data() {}
+
+func (*Indication_OmciInd) isIndication_Data() {}
+
+func (*Indication_PktInd) isIndication_Data() {}
+
+func (*Indication_PortStats) isIndication_Data() {}
+
+func (*Indication_FlowStats) isIndication_Data() {}
+
+func (*Indication_AlarmInd) isIndication_Data() {}
+
+func (m *Indication) GetData() isIndication_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *Indication) GetOltInd() *OltIndication {
+	if x, ok := m.GetData().(*Indication_OltInd); ok {
+		return x.OltInd
+	}
+	return nil
+}
+
+func (m *Indication) GetIntfInd() *IntfIndication {
+	if x, ok := m.GetData().(*Indication_IntfInd); ok {
+		return x.IntfInd
+	}
+	return nil
+}
+
+func (m *Indication) GetIntfOperInd() *IntfOperIndication {
+	if x, ok := m.GetData().(*Indication_IntfOperInd); ok {
+		return x.IntfOperInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOnuDiscInd() *OnuDiscIndication {
+	if x, ok := m.GetData().(*Indication_OnuDiscInd); ok {
+		return x.OnuDiscInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOnuInd() *OnuIndication {
+	if x, ok := m.GetData().(*Indication_OnuInd); ok {
+		return x.OnuInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOmciInd() *OmciIndication {
+	if x, ok := m.GetData().(*Indication_OmciInd); ok {
+		return x.OmciInd
+	}
+	return nil
+}
+
+func (m *Indication) GetPktInd() *PacketIndication {
+	if x, ok := m.GetData().(*Indication_PktInd); ok {
+		return x.PktInd
+	}
+	return nil
+}
+
+func (m *Indication) GetPortStats() *PortStatistics {
+	if x, ok := m.GetData().(*Indication_PortStats); ok {
+		return x.PortStats
+	}
+	return nil
+}
+
+func (m *Indication) GetFlowStats() *FlowStatistics {
+	if x, ok := m.GetData().(*Indication_FlowStats); ok {
+		return x.FlowStats
+	}
+	return nil
+}
+
+func (m *Indication) GetAlarmInd() *AlarmIndication {
+	if x, ok := m.GetData().(*Indication_AlarmInd); ok {
+		return x.AlarmInd
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Indication) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Indication_OltInd)(nil),
+		(*Indication_IntfInd)(nil),
+		(*Indication_IntfOperInd)(nil),
+		(*Indication_OnuDiscInd)(nil),
+		(*Indication_OnuInd)(nil),
+		(*Indication_OmciInd)(nil),
+		(*Indication_PktInd)(nil),
+		(*Indication_PortStats)(nil),
+		(*Indication_FlowStats)(nil),
+		(*Indication_AlarmInd)(nil),
+	}
+}
+
+type AlarmIndication struct {
+	// Types that are valid to be assigned to Data:
+	//	*AlarmIndication_LosInd
+	//	*AlarmIndication_DyingGaspInd
+	//	*AlarmIndication_OnuAlarmInd
+	//	*AlarmIndication_OnuStartupFailInd
+	//	*AlarmIndication_OnuSignalDegradeInd
+	//	*AlarmIndication_OnuDriftOfWindowInd
+	//	*AlarmIndication_OnuLossOmciInd
+	//	*AlarmIndication_OnuSignalsFailInd
+	//	*AlarmIndication_OnuTiwiInd
+	//	*AlarmIndication_OnuActivationFailInd
+	//	*AlarmIndication_OnuProcessingErrorInd
+	Data                 isAlarmIndication_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *AlarmIndication) Reset()         { *m = AlarmIndication{} }
+func (m *AlarmIndication) String() string { return proto.CompactTextString(m) }
+func (*AlarmIndication) ProtoMessage()    {}
+func (*AlarmIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{1}
+}
+
+func (m *AlarmIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmIndication.Unmarshal(m, b)
+}
+func (m *AlarmIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmIndication.Marshal(b, m, deterministic)
+}
+func (m *AlarmIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmIndication.Merge(m, src)
+}
+func (m *AlarmIndication) XXX_Size() int {
+	return xxx_messageInfo_AlarmIndication.Size(m)
+}
+func (m *AlarmIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmIndication proto.InternalMessageInfo
+
+type isAlarmIndication_Data interface {
+	isAlarmIndication_Data()
+}
+
+type AlarmIndication_LosInd struct {
+	LosInd *LosIndication `protobuf:"bytes,1,opt,name=los_ind,json=losInd,proto3,oneof"`
+}
+
+type AlarmIndication_DyingGaspInd struct {
+	DyingGaspInd *DyingGaspIndication `protobuf:"bytes,2,opt,name=dying_gasp_ind,json=dyingGaspInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuAlarmInd struct {
+	OnuAlarmInd *OnuAlarmIndication `protobuf:"bytes,3,opt,name=onu_alarm_ind,json=onuAlarmInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuStartupFailInd struct {
+	OnuStartupFailInd *OnuStartupFailureIndication `protobuf:"bytes,4,opt,name=onu_startup_fail_ind,json=onuStartupFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuSignalDegradeInd struct {
+	OnuSignalDegradeInd *OnuSignalDegradeIndication `protobuf:"bytes,5,opt,name=onu_signal_degrade_ind,json=onuSignalDegradeInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuDriftOfWindowInd struct {
+	OnuDriftOfWindowInd *OnuDriftOfWindowIndication `protobuf:"bytes,6,opt,name=onu_drift_of_window_ind,json=onuDriftOfWindowInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuLossOmciInd struct {
+	OnuLossOmciInd *OnuLossOfOmciChannelIndication `protobuf:"bytes,7,opt,name=onu_loss_omci_ind,json=onuLossOmciInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuSignalsFailInd struct {
+	OnuSignalsFailInd *OnuSignalsFailureIndication `protobuf:"bytes,8,opt,name=onu_signals_fail_ind,json=onuSignalsFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuTiwiInd struct {
+	OnuTiwiInd *OnuTransmissionInterferenceWarning `protobuf:"bytes,9,opt,name=onu_tiwi_ind,json=onuTiwiInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuActivationFailInd struct {
+	OnuActivationFailInd *OnuActivationFailureIndication `protobuf:"bytes,10,opt,name=onu_activation_fail_ind,json=onuActivationFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuProcessingErrorInd struct {
+	OnuProcessingErrorInd *OnuProcessingErrorIndication `protobuf:"bytes,11,opt,name=onu_processing_error_ind,json=onuProcessingErrorInd,proto3,oneof"`
+}
+
+func (*AlarmIndication_LosInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_DyingGaspInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuAlarmInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuStartupFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuSignalDegradeInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuDriftOfWindowInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuLossOmciInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuSignalsFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuTiwiInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuActivationFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuProcessingErrorInd) isAlarmIndication_Data() {}
+
+func (m *AlarmIndication) GetData() isAlarmIndication_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetLosInd() *LosIndication {
+	if x, ok := m.GetData().(*AlarmIndication_LosInd); ok {
+		return x.LosInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetDyingGaspInd() *DyingGaspIndication {
+	if x, ok := m.GetData().(*AlarmIndication_DyingGaspInd); ok {
+		return x.DyingGaspInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuAlarmInd() *OnuAlarmIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuAlarmInd); ok {
+		return x.OnuAlarmInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuStartupFailInd() *OnuStartupFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuStartupFailInd); ok {
+		return x.OnuStartupFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuSignalDegradeInd() *OnuSignalDegradeIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuSignalDegradeInd); ok {
+		return x.OnuSignalDegradeInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuDriftOfWindowInd() *OnuDriftOfWindowIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuDriftOfWindowInd); ok {
+		return x.OnuDriftOfWindowInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuLossOmciInd() *OnuLossOfOmciChannelIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuLossOmciInd); ok {
+		return x.OnuLossOmciInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuSignalsFailInd() *OnuSignalsFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuSignalsFailInd); ok {
+		return x.OnuSignalsFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuTiwiInd() *OnuTransmissionInterferenceWarning {
+	if x, ok := m.GetData().(*AlarmIndication_OnuTiwiInd); ok {
+		return x.OnuTiwiInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuActivationFailInd() *OnuActivationFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuActivationFailInd); ok {
+		return x.OnuActivationFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuProcessingErrorInd() *OnuProcessingErrorIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuProcessingErrorInd); ok {
+		return x.OnuProcessingErrorInd
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*AlarmIndication) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*AlarmIndication_LosInd)(nil),
+		(*AlarmIndication_DyingGaspInd)(nil),
+		(*AlarmIndication_OnuAlarmInd)(nil),
+		(*AlarmIndication_OnuStartupFailInd)(nil),
+		(*AlarmIndication_OnuSignalDegradeInd)(nil),
+		(*AlarmIndication_OnuDriftOfWindowInd)(nil),
+		(*AlarmIndication_OnuLossOmciInd)(nil),
+		(*AlarmIndication_OnuSignalsFailInd)(nil),
+		(*AlarmIndication_OnuTiwiInd)(nil),
+		(*AlarmIndication_OnuActivationFailInd)(nil),
+		(*AlarmIndication_OnuProcessingErrorInd)(nil),
+	}
+}
+
+type OltIndication struct {
+	OperState            string   `protobuf:"bytes,1,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OltIndication) Reset()         { *m = OltIndication{} }
+func (m *OltIndication) String() string { return proto.CompactTextString(m) }
+func (*OltIndication) ProtoMessage()    {}
+func (*OltIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{2}
+}
+
+func (m *OltIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OltIndication.Unmarshal(m, b)
+}
+func (m *OltIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OltIndication.Marshal(b, m, deterministic)
+}
+func (m *OltIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OltIndication.Merge(m, src)
+}
+func (m *OltIndication) XXX_Size() int {
+	return xxx_messageInfo_OltIndication.Size(m)
+}
+func (m *OltIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OltIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OltIndication proto.InternalMessageInfo
+
+func (m *OltIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type IntfIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OperState            string   `protobuf:"bytes,2,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IntfIndication) Reset()         { *m = IntfIndication{} }
+func (m *IntfIndication) String() string { return proto.CompactTextString(m) }
+func (*IntfIndication) ProtoMessage()    {}
+func (*IntfIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{3}
+}
+
+func (m *IntfIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IntfIndication.Unmarshal(m, b)
+}
+func (m *IntfIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IntfIndication.Marshal(b, m, deterministic)
+}
+func (m *IntfIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IntfIndication.Merge(m, src)
+}
+func (m *IntfIndication) XXX_Size() int {
+	return xxx_messageInfo_IntfIndication.Size(m)
+}
+func (m *IntfIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_IntfIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntfIndication proto.InternalMessageInfo
+
+func (m *IntfIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *IntfIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type OnuDiscIndication struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OnuDiscIndication) Reset()         { *m = OnuDiscIndication{} }
+func (m *OnuDiscIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuDiscIndication) ProtoMessage()    {}
+func (*OnuDiscIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{4}
+}
+
+func (m *OnuDiscIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuDiscIndication.Unmarshal(m, b)
+}
+func (m *OnuDiscIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuDiscIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuDiscIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuDiscIndication.Merge(m, src)
+}
+func (m *OnuDiscIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuDiscIndication.Size(m)
+}
+func (m *OnuDiscIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuDiscIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuDiscIndication proto.InternalMessageInfo
+
+func (m *OnuDiscIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuDiscIndication) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+type OnuIndication struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32        `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	OperState            string        `protobuf:"bytes,3,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	AdminState           string        `protobuf:"bytes,5,opt,name=admin_state,json=adminState,proto3" json:"admin_state,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,4,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OnuIndication) Reset()         { *m = OnuIndication{} }
+func (m *OnuIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuIndication) ProtoMessage()    {}
+func (*OnuIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{5}
+}
+
+func (m *OnuIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuIndication.Unmarshal(m, b)
+}
+func (m *OnuIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuIndication.Merge(m, src)
+}
+func (m *OnuIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuIndication.Size(m)
+}
+func (m *OnuIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuIndication proto.InternalMessageInfo
+
+func (m *OnuIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+func (m *OnuIndication) GetAdminState() string {
+	if m != nil {
+		return m.AdminState
+	}
+	return ""
+}
+
+func (m *OnuIndication) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+type IntfOperIndication struct {
+	Type                 string   `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	IntfId               uint32   `protobuf:"fixed32,2,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OperState            string   `protobuf:"bytes,3,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IntfOperIndication) Reset()         { *m = IntfOperIndication{} }
+func (m *IntfOperIndication) String() string { return proto.CompactTextString(m) }
+func (*IntfOperIndication) ProtoMessage()    {}
+func (*IntfOperIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{6}
+}
+
+func (m *IntfOperIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IntfOperIndication.Unmarshal(m, b)
+}
+func (m *IntfOperIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IntfOperIndication.Marshal(b, m, deterministic)
+}
+func (m *IntfOperIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IntfOperIndication.Merge(m, src)
+}
+func (m *IntfOperIndication) XXX_Size() int {
+	return xxx_messageInfo_IntfOperIndication.Size(m)
+}
+func (m *IntfOperIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_IntfOperIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntfOperIndication proto.InternalMessageInfo
+
+func (m *IntfOperIndication) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *IntfOperIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *IntfOperIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type OmciIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciIndication) Reset()         { *m = OmciIndication{} }
+func (m *OmciIndication) String() string { return proto.CompactTextString(m) }
+func (*OmciIndication) ProtoMessage()    {}
+func (*OmciIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{7}
+}
+
+func (m *OmciIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciIndication.Unmarshal(m, b)
+}
+func (m *OmciIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciIndication.Marshal(b, m, deterministic)
+}
+func (m *OmciIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciIndication.Merge(m, src)
+}
+func (m *OmciIndication) XXX_Size() int {
+	return xxx_messageInfo_OmciIndication.Size(m)
+}
+func (m *OmciIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciIndication proto.InternalMessageInfo
+
+func (m *OmciIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OmciIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OmciIndication) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type PacketIndication struct {
+	IntfType             string   `protobuf:"bytes,5,opt,name=intf_type,json=intfType,proto3" json:"intf_type,omitempty"`
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	GemportId            uint32   `protobuf:"fixed32,2,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	FlowId               uint32   `protobuf:"fixed32,3,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	PortNo               uint32   `protobuf:"fixed32,6,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	Cookie               uint64   `protobuf:"fixed64,7,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,4,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PacketIndication) Reset()         { *m = PacketIndication{} }
+func (m *PacketIndication) String() string { return proto.CompactTextString(m) }
+func (*PacketIndication) ProtoMessage()    {}
+func (*PacketIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{8}
+}
+
+func (m *PacketIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketIndication.Unmarshal(m, b)
+}
+func (m *PacketIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketIndication.Marshal(b, m, deterministic)
+}
+func (m *PacketIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketIndication.Merge(m, src)
+}
+func (m *PacketIndication) XXX_Size() int {
+	return xxx_messageInfo_PacketIndication.Size(m)
+}
+func (m *PacketIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketIndication proto.InternalMessageInfo
+
+func (m *PacketIndication) GetIntfType() string {
+	if m != nil {
+		return m.IntfType
+	}
+	return ""
+}
+
+func (m *PacketIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type Interface struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Interface) Reset()         { *m = Interface{} }
+func (m *Interface) String() string { return proto.CompactTextString(m) }
+func (*Interface) ProtoMessage()    {}
+func (*Interface) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{9}
+}
+
+func (m *Interface) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Interface.Unmarshal(m, b)
+}
+func (m *Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Interface.Marshal(b, m, deterministic)
+}
+func (m *Interface) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Interface.Merge(m, src)
+}
+func (m *Interface) XXX_Size() int {
+	return xxx_messageInfo_Interface.Size(m)
+}
+func (m *Interface) XXX_DiscardUnknown() {
+	xxx_messageInfo_Interface.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Interface proto.InternalMessageInfo
+
+func (m *Interface) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+type Heartbeat struct {
+	HeartbeatSignature   uint32   `protobuf:"fixed32,1,opt,name=heartbeat_signature,json=heartbeatSignature,proto3" json:"heartbeat_signature,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Heartbeat) Reset()         { *m = Heartbeat{} }
+func (m *Heartbeat) String() string { return proto.CompactTextString(m) }
+func (*Heartbeat) ProtoMessage()    {}
+func (*Heartbeat) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{10}
+}
+
+func (m *Heartbeat) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Heartbeat.Unmarshal(m, b)
+}
+func (m *Heartbeat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Heartbeat.Marshal(b, m, deterministic)
+}
+func (m *Heartbeat) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Heartbeat.Merge(m, src)
+}
+func (m *Heartbeat) XXX_Size() int {
+	return xxx_messageInfo_Heartbeat.Size(m)
+}
+func (m *Heartbeat) XXX_DiscardUnknown() {
+	xxx_messageInfo_Heartbeat.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Heartbeat proto.InternalMessageInfo
+
+func (m *Heartbeat) GetHeartbeatSignature() uint32 {
+	if m != nil {
+		return m.HeartbeatSignature
+	}
+	return 0
+}
+
+type Onu struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32        `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,3,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	Pir                  uint32        `protobuf:"fixed32,4,opt,name=pir,proto3" json:"pir,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *Onu) Reset()         { *m = Onu{} }
+func (m *Onu) String() string { return proto.CompactTextString(m) }
+func (*Onu) ProtoMessage()    {}
+func (*Onu) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{11}
+}
+
+func (m *Onu) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Onu.Unmarshal(m, b)
+}
+func (m *Onu) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Onu.Marshal(b, m, deterministic)
+}
+func (m *Onu) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Onu.Merge(m, src)
+}
+func (m *Onu) XXX_Size() int {
+	return xxx_messageInfo_Onu.Size(m)
+}
+func (m *Onu) XXX_DiscardUnknown() {
+	xxx_messageInfo_Onu.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Onu proto.InternalMessageInfo
+
+func (m *Onu) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *Onu) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Onu) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+func (m *Onu) GetPir() uint32 {
+	if m != nil {
+		return m.Pir
+	}
+	return 0
+}
+
+type OmciMsg struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciMsg) Reset()         { *m = OmciMsg{} }
+func (m *OmciMsg) String() string { return proto.CompactTextString(m) }
+func (*OmciMsg) ProtoMessage()    {}
+func (*OmciMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{12}
+}
+
+func (m *OmciMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciMsg.Unmarshal(m, b)
+}
+func (m *OmciMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciMsg.Marshal(b, m, deterministic)
+}
+func (m *OmciMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciMsg.Merge(m, src)
+}
+func (m *OmciMsg) XXX_Size() int {
+	return xxx_messageInfo_OmciMsg.Size(m)
+}
+func (m *OmciMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciMsg proto.InternalMessageInfo
+
+func (m *OmciMsg) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OmciMsg) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OmciMsg) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type OnuPacket struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	PortNo               uint32   `protobuf:"fixed32,4,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	GemportId            uint32   `protobuf:"fixed32,5,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuPacket) Reset()         { *m = OnuPacket{} }
+func (m *OnuPacket) String() string { return proto.CompactTextString(m) }
+func (*OnuPacket) ProtoMessage()    {}
+func (*OnuPacket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{13}
+}
+
+func (m *OnuPacket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuPacket.Unmarshal(m, b)
+}
+func (m *OnuPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuPacket.Marshal(b, m, deterministic)
+}
+func (m *OnuPacket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuPacket.Merge(m, src)
+}
+func (m *OnuPacket) XXX_Size() int {
+	return xxx_messageInfo_OnuPacket.Size(m)
+}
+func (m *OnuPacket) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuPacket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuPacket proto.InternalMessageInfo
+
+func (m *OnuPacket) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type UplinkPacket struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,2,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UplinkPacket) Reset()         { *m = UplinkPacket{} }
+func (m *UplinkPacket) String() string { return proto.CompactTextString(m) }
+func (*UplinkPacket) ProtoMessage()    {}
+func (*UplinkPacket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{14}
+}
+
+func (m *UplinkPacket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UplinkPacket.Unmarshal(m, b)
+}
+func (m *UplinkPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UplinkPacket.Marshal(b, m, deterministic)
+}
+func (m *UplinkPacket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UplinkPacket.Merge(m, src)
+}
+func (m *UplinkPacket) XXX_Size() int {
+	return xxx_messageInfo_UplinkPacket.Size(m)
+}
+func (m *UplinkPacket) XXX_DiscardUnknown() {
+	xxx_messageInfo_UplinkPacket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UplinkPacket proto.InternalMessageInfo
+
+func (m *UplinkPacket) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *UplinkPacket) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type DeviceInfo struct {
+	Vendor             string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Model              string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
+	HardwareVersion    string `protobuf:"bytes,3,opt,name=hardware_version,json=hardwareVersion,proto3" json:"hardware_version,omitempty"`
+	FirmwareVersion    string `protobuf:"bytes,4,opt,name=firmware_version,json=firmwareVersion,proto3" json:"firmware_version,omitempty"`
+	DeviceId           string `protobuf:"bytes,16,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DeviceSerialNumber string `protobuf:"bytes,17,opt,name=device_serial_number,json=deviceSerialNumber,proto3" json:"device_serial_number,omitempty"`
+	// Total number of pon intf ports on the device
+	PonPorts uint32 `protobuf:"fixed32,12,opt,name=pon_ports,json=ponPorts,proto3" json:"pon_ports,omitempty"`
+	// If using global per-device technology profile. To be deprecated
+	Technology           string                             `protobuf:"bytes,5,opt,name=technology,proto3" json:"technology,omitempty"`
+	OnuIdStart           uint32                             `protobuf:"fixed32,6,opt,name=onu_id_start,json=onuIdStart,proto3" json:"onu_id_start,omitempty"`
+	OnuIdEnd             uint32                             `protobuf:"fixed32,7,opt,name=onu_id_end,json=onuIdEnd,proto3" json:"onu_id_end,omitempty"`
+	AllocIdStart         uint32                             `protobuf:"fixed32,8,opt,name=alloc_id_start,json=allocIdStart,proto3" json:"alloc_id_start,omitempty"`
+	AllocIdEnd           uint32                             `protobuf:"fixed32,9,opt,name=alloc_id_end,json=allocIdEnd,proto3" json:"alloc_id_end,omitempty"`
+	GemportIdStart       uint32                             `protobuf:"fixed32,10,opt,name=gemport_id_start,json=gemportIdStart,proto3" json:"gemport_id_start,omitempty"`
+	GemportIdEnd         uint32                             `protobuf:"fixed32,11,opt,name=gemport_id_end,json=gemportIdEnd,proto3" json:"gemport_id_end,omitempty"`
+	FlowIdStart          uint32                             `protobuf:"fixed32,13,opt,name=flow_id_start,json=flowIdStart,proto3" json:"flow_id_start,omitempty"`
+	FlowIdEnd            uint32                             `protobuf:"fixed32,14,opt,name=flow_id_end,json=flowIdEnd,proto3" json:"flow_id_end,omitempty"`
+	Ranges               []*DeviceInfo_DeviceResourceRanges `protobuf:"bytes,15,rep,name=ranges,proto3" json:"ranges,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *DeviceInfo) Reset()         { *m = DeviceInfo{} }
+func (m *DeviceInfo) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo) ProtoMessage()    {}
+func (*DeviceInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15}
+}
+
+func (m *DeviceInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo.Unmarshal(m, b)
+}
+func (m *DeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo.Merge(m, src)
+}
+func (m *DeviceInfo) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo.Size(m)
+}
+func (m *DeviceInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo proto.InternalMessageInfo
+
+func (m *DeviceInfo) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetModel() string {
+	if m != nil {
+		return m.Model
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetHardwareVersion() string {
+	if m != nil {
+		return m.HardwareVersion
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetFirmwareVersion() string {
+	if m != nil {
+		return m.FirmwareVersion
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetDeviceSerialNumber() string {
+	if m != nil {
+		return m.DeviceSerialNumber
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetPonPorts() uint32 {
+	if m != nil {
+		return m.PonPorts
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetTechnology() string {
+	if m != nil {
+		return m.Technology
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetOnuIdStart() uint32 {
+	if m != nil {
+		return m.OnuIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetOnuIdEnd() uint32 {
+	if m != nil {
+		return m.OnuIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetAllocIdStart() uint32 {
+	if m != nil {
+		return m.AllocIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetAllocIdEnd() uint32 {
+	if m != nil {
+		return m.AllocIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetGemportIdStart() uint32 {
+	if m != nil {
+		return m.GemportIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetGemportIdEnd() uint32 {
+	if m != nil {
+		return m.GemportIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetFlowIdStart() uint32 {
+	if m != nil {
+		return m.FlowIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetFlowIdEnd() uint32 {
+	if m != nil {
+		return m.FlowIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetRanges() []*DeviceInfo_DeviceResourceRanges {
+	if m != nil {
+		return m.Ranges
+	}
+	return nil
+}
+
+type DeviceInfo_DeviceResourceRanges struct {
+	// List of 0 or more intf_ids that use the same technology and pools.
+	// If 0 intf_ids supplied, it implies ALL interfaces
+	IntfIds []uint32 `protobuf:"fixed32,1,rep,packed,name=intf_ids,json=intfIds,proto3" json:"intf_ids,omitempty"`
+	// Technology profile for this pool
+	Technology           string                                  `protobuf:"bytes,2,opt,name=technology,proto3" json:"technology,omitempty"`
+	Pools                []*DeviceInfo_DeviceResourceRanges_Pool `protobuf:"bytes,3,rep,name=pools,proto3" json:"pools,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                `json:"-"`
+	XXX_unrecognized     []byte                                  `json:"-"`
+	XXX_sizecache        int32                                   `json:"-"`
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) Reset()         { *m = DeviceInfo_DeviceResourceRanges{} }
+func (m *DeviceInfo_DeviceResourceRanges) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo_DeviceResourceRanges) ProtoMessage()    {}
+func (*DeviceInfo_DeviceResourceRanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0}
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Unmarshal(m, b)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Merge(m, src)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Size(m)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo_DeviceResourceRanges proto.InternalMessageInfo
+
+func (m *DeviceInfo_DeviceResourceRanges) GetIntfIds() []uint32 {
+	if m != nil {
+		return m.IntfIds
+	}
+	return nil
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) GetTechnology() string {
+	if m != nil {
+		return m.Technology
+	}
+	return ""
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) GetPools() []*DeviceInfo_DeviceResourceRanges_Pool {
+	if m != nil {
+		return m.Pools
+	}
+	return nil
+}
+
+type DeviceInfo_DeviceResourceRanges_Pool struct {
+	Type                 DeviceInfo_DeviceResourceRanges_Pool_PoolType    `protobuf:"varint,1,opt,name=type,proto3,enum=openolt.DeviceInfo_DeviceResourceRanges_Pool_PoolType" json:"type,omitempty"`
+	Sharing              DeviceInfo_DeviceResourceRanges_Pool_SharingType `protobuf:"varint,2,opt,name=sharing,proto3,enum=openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType" json:"sharing,omitempty"`
+	Start                uint32                                           `protobuf:"fixed32,3,opt,name=start,proto3" json:"start,omitempty"`
+	End                  uint32                                           `protobuf:"fixed32,4,opt,name=end,proto3" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                         `json:"-"`
+	XXX_unrecognized     []byte                                           `json:"-"`
+	XXX_sizecache        int32                                            `json:"-"`
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) Reset()         { *m = DeviceInfo_DeviceResourceRanges_Pool{} }
+func (m *DeviceInfo_DeviceResourceRanges_Pool) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo_DeviceResourceRanges_Pool) ProtoMessage()    {}
+func (*DeviceInfo_DeviceResourceRanges_Pool) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0}
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Unmarshal(m, b)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Merge(m, src)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Size(m)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool proto.InternalMessageInfo
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetType() DeviceInfo_DeviceResourceRanges_Pool_PoolType {
+	if m != nil {
+		return m.Type
+	}
+	return DeviceInfo_DeviceResourceRanges_Pool_ONU_ID
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetSharing() DeviceInfo_DeviceResourceRanges_Pool_SharingType {
+	if m != nil {
+		return m.Sharing
+	}
+	return DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetStart() uint32 {
+	if m != nil {
+		return m.Start
+	}
+	return 0
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetEnd() uint32 {
+	if m != nil {
+		return m.End
+	}
+	return 0
+}
+
+type Classifier struct {
+	OTpid                uint32   `protobuf:"fixed32,1,opt,name=o_tpid,json=oTpid,proto3" json:"o_tpid,omitempty"`
+	OVid                 uint32   `protobuf:"fixed32,2,opt,name=o_vid,json=oVid,proto3" json:"o_vid,omitempty"`
+	ITpid                uint32   `protobuf:"fixed32,3,opt,name=i_tpid,json=iTpid,proto3" json:"i_tpid,omitempty"`
+	IVid                 uint32   `protobuf:"fixed32,4,opt,name=i_vid,json=iVid,proto3" json:"i_vid,omitempty"`
+	OPbits               uint32   `protobuf:"fixed32,5,opt,name=o_pbits,json=oPbits,proto3" json:"o_pbits,omitempty"`
+	IPbits               uint32   `protobuf:"fixed32,6,opt,name=i_pbits,json=iPbits,proto3" json:"i_pbits,omitempty"`
+	EthType              uint32   `protobuf:"fixed32,7,opt,name=eth_type,json=ethType,proto3" json:"eth_type,omitempty"`
+	DstMac               []byte   `protobuf:"bytes,8,opt,name=dst_mac,json=dstMac,proto3" json:"dst_mac,omitempty"`
+	SrcMac               []byte   `protobuf:"bytes,9,opt,name=src_mac,json=srcMac,proto3" json:"src_mac,omitempty"`
+	IpProto              uint32   `protobuf:"fixed32,10,opt,name=ip_proto,json=ipProto,proto3" json:"ip_proto,omitempty"`
+	DstIp                uint32   `protobuf:"fixed32,11,opt,name=dst_ip,json=dstIp,proto3" json:"dst_ip,omitempty"`
+	SrcIp                uint32   `protobuf:"fixed32,12,opt,name=src_ip,json=srcIp,proto3" json:"src_ip,omitempty"`
+	SrcPort              uint32   `protobuf:"fixed32,13,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"`
+	DstPort              uint32   `protobuf:"fixed32,14,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"`
+	PktTagType           string   `protobuf:"bytes,15,opt,name=pkt_tag_type,json=pktTagType,proto3" json:"pkt_tag_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Classifier) Reset()         { *m = Classifier{} }
+func (m *Classifier) String() string { return proto.CompactTextString(m) }
+func (*Classifier) ProtoMessage()    {}
+func (*Classifier) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{16}
+}
+
+func (m *Classifier) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Classifier.Unmarshal(m, b)
+}
+func (m *Classifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Classifier.Marshal(b, m, deterministic)
+}
+func (m *Classifier) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Classifier.Merge(m, src)
+}
+func (m *Classifier) XXX_Size() int {
+	return xxx_messageInfo_Classifier.Size(m)
+}
+func (m *Classifier) XXX_DiscardUnknown() {
+	xxx_messageInfo_Classifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Classifier proto.InternalMessageInfo
+
+func (m *Classifier) GetOTpid() uint32 {
+	if m != nil {
+		return m.OTpid
+	}
+	return 0
+}
+
+func (m *Classifier) GetOVid() uint32 {
+	if m != nil {
+		return m.OVid
+	}
+	return 0
+}
+
+func (m *Classifier) GetITpid() uint32 {
+	if m != nil {
+		return m.ITpid
+	}
+	return 0
+}
+
+func (m *Classifier) GetIVid() uint32 {
+	if m != nil {
+		return m.IVid
+	}
+	return 0
+}
+
+func (m *Classifier) GetOPbits() uint32 {
+	if m != nil {
+		return m.OPbits
+	}
+	return 0
+}
+
+func (m *Classifier) GetIPbits() uint32 {
+	if m != nil {
+		return m.IPbits
+	}
+	return 0
+}
+
+func (m *Classifier) GetEthType() uint32 {
+	if m != nil {
+		return m.EthType
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstMac() []byte {
+	if m != nil {
+		return m.DstMac
+	}
+	return nil
+}
+
+func (m *Classifier) GetSrcMac() []byte {
+	if m != nil {
+		return m.SrcMac
+	}
+	return nil
+}
+
+func (m *Classifier) GetIpProto() uint32 {
+	if m != nil {
+		return m.IpProto
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstIp() uint32 {
+	if m != nil {
+		return m.DstIp
+	}
+	return 0
+}
+
+func (m *Classifier) GetSrcIp() uint32 {
+	if m != nil {
+		return m.SrcIp
+	}
+	return 0
+}
+
+func (m *Classifier) GetSrcPort() uint32 {
+	if m != nil {
+		return m.SrcPort
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstPort() uint32 {
+	if m != nil {
+		return m.DstPort
+	}
+	return 0
+}
+
+func (m *Classifier) GetPktTagType() string {
+	if m != nil {
+		return m.PktTagType
+	}
+	return ""
+}
+
+type ActionCmd struct {
+	AddOuterTag          bool     `protobuf:"varint,1,opt,name=add_outer_tag,json=addOuterTag,proto3" json:"add_outer_tag,omitempty"`
+	RemoveOuterTag       bool     `protobuf:"varint,2,opt,name=remove_outer_tag,json=removeOuterTag,proto3" json:"remove_outer_tag,omitempty"`
+	TrapToHost           bool     `protobuf:"varint,3,opt,name=trap_to_host,json=trapToHost,proto3" json:"trap_to_host,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ActionCmd) Reset()         { *m = ActionCmd{} }
+func (m *ActionCmd) String() string { return proto.CompactTextString(m) }
+func (*ActionCmd) ProtoMessage()    {}
+func (*ActionCmd) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{17}
+}
+
+func (m *ActionCmd) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ActionCmd.Unmarshal(m, b)
+}
+func (m *ActionCmd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ActionCmd.Marshal(b, m, deterministic)
+}
+func (m *ActionCmd) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ActionCmd.Merge(m, src)
+}
+func (m *ActionCmd) XXX_Size() int {
+	return xxx_messageInfo_ActionCmd.Size(m)
+}
+func (m *ActionCmd) XXX_DiscardUnknown() {
+	xxx_messageInfo_ActionCmd.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ActionCmd proto.InternalMessageInfo
+
+func (m *ActionCmd) GetAddOuterTag() bool {
+	if m != nil {
+		return m.AddOuterTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetRemoveOuterTag() bool {
+	if m != nil {
+		return m.RemoveOuterTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetTrapToHost() bool {
+	if m != nil {
+		return m.TrapToHost
+	}
+	return false
+}
+
+type Action struct {
+	Cmd                  *ActionCmd `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"`
+	OVid                 uint32     `protobuf:"fixed32,2,opt,name=o_vid,json=oVid,proto3" json:"o_vid,omitempty"`
+	OPbits               uint32     `protobuf:"fixed32,3,opt,name=o_pbits,json=oPbits,proto3" json:"o_pbits,omitempty"`
+	OTpid                uint32     `protobuf:"fixed32,4,opt,name=o_tpid,json=oTpid,proto3" json:"o_tpid,omitempty"`
+	IVid                 uint32     `protobuf:"fixed32,5,opt,name=i_vid,json=iVid,proto3" json:"i_vid,omitempty"`
+	IPbits               uint32     `protobuf:"fixed32,6,opt,name=i_pbits,json=iPbits,proto3" json:"i_pbits,omitempty"`
+	ITpid                uint32     `protobuf:"fixed32,7,opt,name=i_tpid,json=iTpid,proto3" json:"i_tpid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *Action) Reset()         { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage()    {}
+func (*Action) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{18}
+}
+
+func (m *Action) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Action.Unmarshal(m, b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Action.Marshal(b, m, deterministic)
+}
+func (m *Action) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Action.Merge(m, src)
+}
+func (m *Action) XXX_Size() int {
+	return xxx_messageInfo_Action.Size(m)
+}
+func (m *Action) XXX_DiscardUnknown() {
+	xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
+
+func (m *Action) GetCmd() *ActionCmd {
+	if m != nil {
+		return m.Cmd
+	}
+	return nil
+}
+
+func (m *Action) GetOVid() uint32 {
+	if m != nil {
+		return m.OVid
+	}
+	return 0
+}
+
+func (m *Action) GetOPbits() uint32 {
+	if m != nil {
+		return m.OPbits
+	}
+	return 0
+}
+
+func (m *Action) GetOTpid() uint32 {
+	if m != nil {
+		return m.OTpid
+	}
+	return 0
+}
+
+func (m *Action) GetIVid() uint32 {
+	if m != nil {
+		return m.IVid
+	}
+	return 0
+}
+
+func (m *Action) GetIPbits() uint32 {
+	if m != nil {
+		return m.IPbits
+	}
+	return 0
+}
+
+func (m *Action) GetITpid() uint32 {
+	if m != nil {
+		return m.ITpid
+	}
+	return 0
+}
+
+type Flow struct {
+	AccessIntfId         int32       `protobuf:"fixed32,1,opt,name=access_intf_id,json=accessIntfId,proto3" json:"access_intf_id,omitempty"`
+	OnuId                int32       `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                int32       `protobuf:"fixed32,11,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	FlowId               uint32      `protobuf:"fixed32,3,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	FlowType             string      `protobuf:"bytes,4,opt,name=flow_type,json=flowType,proto3" json:"flow_type,omitempty"`
+	AllocId              int32       `protobuf:"fixed32,10,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	NetworkIntfId        int32       `protobuf:"fixed32,5,opt,name=network_intf_id,json=networkIntfId,proto3" json:"network_intf_id,omitempty"`
+	GemportId            int32       `protobuf:"fixed32,6,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	Classifier           *Classifier `protobuf:"bytes,7,opt,name=classifier,proto3" json:"classifier,omitempty"`
+	Action               *Action     `protobuf:"bytes,8,opt,name=action,proto3" json:"action,omitempty"`
+	Priority             int32       `protobuf:"fixed32,9,opt,name=priority,proto3" json:"priority,omitempty"`
+	Cookie               uint64      `protobuf:"fixed64,12,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	PortNo               uint32      `protobuf:"fixed32,13,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Flow) Reset()         { *m = Flow{} }
+func (m *Flow) String() string { return proto.CompactTextString(m) }
+func (*Flow) ProtoMessage()    {}
+func (*Flow) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{19}
+}
+
+func (m *Flow) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Flow.Unmarshal(m, b)
+}
+func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Flow.Marshal(b, m, deterministic)
+}
+func (m *Flow) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Flow.Merge(m, src)
+}
+func (m *Flow) XXX_Size() int {
+	return xxx_messageInfo_Flow.Size(m)
+}
+func (m *Flow) XXX_DiscardUnknown() {
+	xxx_messageInfo_Flow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Flow proto.InternalMessageInfo
+
+func (m *Flow) GetAccessIntfId() int32 {
+	if m != nil {
+		return m.AccessIntfId
+	}
+	return 0
+}
+
+func (m *Flow) GetOnuId() int32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Flow) GetUniId() int32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *Flow) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *Flow) GetFlowType() string {
+	if m != nil {
+		return m.FlowType
+	}
+	return ""
+}
+
+func (m *Flow) GetAllocId() int32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *Flow) GetNetworkIntfId() int32 {
+	if m != nil {
+		return m.NetworkIntfId
+	}
+	return 0
+}
+
+func (m *Flow) GetGemportId() int32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *Flow) GetClassifier() *Classifier {
+	if m != nil {
+		return m.Classifier
+	}
+	return nil
+}
+
+func (m *Flow) GetAction() *Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+func (m *Flow) GetPriority() int32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *Flow) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *Flow) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+type SerialNumber struct {
+	VendorId             []byte   `protobuf:"bytes,1,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	VendorSpecific       []byte   `protobuf:"bytes,2,opt,name=vendor_specific,json=vendorSpecific,proto3" json:"vendor_specific,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SerialNumber) Reset()         { *m = SerialNumber{} }
+func (m *SerialNumber) String() string { return proto.CompactTextString(m) }
+func (*SerialNumber) ProtoMessage()    {}
+func (*SerialNumber) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{20}
+}
+
+func (m *SerialNumber) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SerialNumber.Unmarshal(m, b)
+}
+func (m *SerialNumber) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SerialNumber.Marshal(b, m, deterministic)
+}
+func (m *SerialNumber) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SerialNumber.Merge(m, src)
+}
+func (m *SerialNumber) XXX_Size() int {
+	return xxx_messageInfo_SerialNumber.Size(m)
+}
+func (m *SerialNumber) XXX_DiscardUnknown() {
+	xxx_messageInfo_SerialNumber.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SerialNumber proto.InternalMessageInfo
+
+func (m *SerialNumber) GetVendorId() []byte {
+	if m != nil {
+		return m.VendorId
+	}
+	return nil
+}
+
+func (m *SerialNumber) GetVendorSpecific() []byte {
+	if m != nil {
+		return m.VendorSpecific
+	}
+	return nil
+}
+
+type PortStatistics struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	RxBytes              uint64   `protobuf:"fixed64,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxPackets            uint64   `protobuf:"fixed64,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	RxUcastPackets       uint64   `protobuf:"fixed64,4,opt,name=rx_ucast_packets,json=rxUcastPackets,proto3" json:"rx_ucast_packets,omitempty"`
+	RxMcastPackets       uint64   `protobuf:"fixed64,5,opt,name=rx_mcast_packets,json=rxMcastPackets,proto3" json:"rx_mcast_packets,omitempty"`
+	RxBcastPackets       uint64   `protobuf:"fixed64,6,opt,name=rx_bcast_packets,json=rxBcastPackets,proto3" json:"rx_bcast_packets,omitempty"`
+	RxErrorPackets       uint64   `protobuf:"fixed64,7,opt,name=rx_error_packets,json=rxErrorPackets,proto3" json:"rx_error_packets,omitempty"`
+	TxBytes              uint64   `protobuf:"fixed64,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"fixed64,9,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxUcastPackets       uint64   `protobuf:"fixed64,10,opt,name=tx_ucast_packets,json=txUcastPackets,proto3" json:"tx_ucast_packets,omitempty"`
+	TxMcastPackets       uint64   `protobuf:"fixed64,11,opt,name=tx_mcast_packets,json=txMcastPackets,proto3" json:"tx_mcast_packets,omitempty"`
+	TxBcastPackets       uint64   `protobuf:"fixed64,12,opt,name=tx_bcast_packets,json=txBcastPackets,proto3" json:"tx_bcast_packets,omitempty"`
+	TxErrorPackets       uint64   `protobuf:"fixed64,13,opt,name=tx_error_packets,json=txErrorPackets,proto3" json:"tx_error_packets,omitempty"`
+	RxCrcErrors          uint64   `protobuf:"fixed64,14,opt,name=rx_crc_errors,json=rxCrcErrors,proto3" json:"rx_crc_errors,omitempty"`
+	BipErrors            uint64   `protobuf:"fixed64,15,opt,name=bip_errors,json=bipErrors,proto3" json:"bip_errors,omitempty"`
+	Timestamp            uint32   `protobuf:"fixed32,16,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PortStatistics) Reset()         { *m = PortStatistics{} }
+func (m *PortStatistics) String() string { return proto.CompactTextString(m) }
+func (*PortStatistics) ProtoMessage()    {}
+func (*PortStatistics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{21}
+}
+
+func (m *PortStatistics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PortStatistics.Unmarshal(m, b)
+}
+func (m *PortStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PortStatistics.Marshal(b, m, deterministic)
+}
+func (m *PortStatistics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortStatistics.Merge(m, src)
+}
+func (m *PortStatistics) XXX_Size() int {
+	return xxx_messageInfo_PortStatistics.Size(m)
+}
+func (m *PortStatistics) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortStatistics proto.InternalMessageInfo
+
+func (m *PortStatistics) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxUcastPackets() uint64 {
+	if m != nil {
+		return m.RxUcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxMcastPackets() uint64 {
+	if m != nil {
+		return m.RxMcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxBcastPackets() uint64 {
+	if m != nil {
+		return m.RxBcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxErrorPackets() uint64 {
+	if m != nil {
+		return m.RxErrorPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxUcastPackets() uint64 {
+	if m != nil {
+		return m.TxUcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxMcastPackets() uint64 {
+	if m != nil {
+		return m.TxMcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxBcastPackets() uint64 {
+	if m != nil {
+		return m.TxBcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxErrorPackets() uint64 {
+	if m != nil {
+		return m.TxErrorPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxCrcErrors() uint64 {
+	if m != nil {
+		return m.RxCrcErrors
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetBipErrors() uint64 {
+	if m != nil {
+		return m.BipErrors
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTimestamp() uint32 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+type FlowStatistics struct {
+	FlowId               uint32   `protobuf:"fixed32,1,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	RxBytes              uint64   `protobuf:"fixed64,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxPackets            uint64   `protobuf:"fixed64,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	TxBytes              uint64   `protobuf:"fixed64,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"fixed64,9,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	Timestamp            uint32   `protobuf:"fixed32,16,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FlowStatistics) Reset()         { *m = FlowStatistics{} }
+func (m *FlowStatistics) String() string { return proto.CompactTextString(m) }
+func (*FlowStatistics) ProtoMessage()    {}
+func (*FlowStatistics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{22}
+}
+
+func (m *FlowStatistics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowStatistics.Unmarshal(m, b)
+}
+func (m *FlowStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowStatistics.Marshal(b, m, deterministic)
+}
+func (m *FlowStatistics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowStatistics.Merge(m, src)
+}
+func (m *FlowStatistics) XXX_Size() int {
+	return xxx_messageInfo_FlowStatistics.Size(m)
+}
+func (m *FlowStatistics) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowStatistics proto.InternalMessageInfo
+
+func (m *FlowStatistics) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTimestamp() uint32 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+type LosIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	Status               string   `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LosIndication) Reset()         { *m = LosIndication{} }
+func (m *LosIndication) String() string { return proto.CompactTextString(m) }
+func (*LosIndication) ProtoMessage()    {}
+func (*LosIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{23}
+}
+
+func (m *LosIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LosIndication.Unmarshal(m, b)
+}
+func (m *LosIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LosIndication.Marshal(b, m, deterministic)
+}
+func (m *LosIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LosIndication.Merge(m, src)
+}
+func (m *LosIndication) XXX_Size() int {
+	return xxx_messageInfo_LosIndication.Size(m)
+}
+func (m *LosIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_LosIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LosIndication proto.InternalMessageInfo
+
+func (m *LosIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *LosIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type DyingGaspIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DyingGaspIndication) Reset()         { *m = DyingGaspIndication{} }
+func (m *DyingGaspIndication) String() string { return proto.CompactTextString(m) }
+func (*DyingGaspIndication) ProtoMessage()    {}
+func (*DyingGaspIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{24}
+}
+
+func (m *DyingGaspIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DyingGaspIndication.Unmarshal(m, b)
+}
+func (m *DyingGaspIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DyingGaspIndication.Marshal(b, m, deterministic)
+}
+func (m *DyingGaspIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DyingGaspIndication.Merge(m, src)
+}
+func (m *DyingGaspIndication) XXX_Size() int {
+	return xxx_messageInfo_DyingGaspIndication.Size(m)
+}
+func (m *DyingGaspIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_DyingGaspIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DyingGaspIndication proto.InternalMessageInfo
+
+func (m *DyingGaspIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *DyingGaspIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *DyingGaspIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuAlarmIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	LosStatus            string   `protobuf:"bytes,3,opt,name=los_status,json=losStatus,proto3" json:"los_status,omitempty"`
+	LobStatus            string   `protobuf:"bytes,4,opt,name=lob_status,json=lobStatus,proto3" json:"lob_status,omitempty"`
+	LopcMissStatus       string   `protobuf:"bytes,5,opt,name=lopc_miss_status,json=lopcMissStatus,proto3" json:"lopc_miss_status,omitempty"`
+	LopcMicErrorStatus   string   `protobuf:"bytes,6,opt,name=lopc_mic_error_status,json=lopcMicErrorStatus,proto3" json:"lopc_mic_error_status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuAlarmIndication) Reset()         { *m = OnuAlarmIndication{} }
+func (m *OnuAlarmIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuAlarmIndication) ProtoMessage()    {}
+func (*OnuAlarmIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{25}
+}
+
+func (m *OnuAlarmIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuAlarmIndication.Unmarshal(m, b)
+}
+func (m *OnuAlarmIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuAlarmIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuAlarmIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuAlarmIndication.Merge(m, src)
+}
+func (m *OnuAlarmIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuAlarmIndication.Size(m)
+}
+func (m *OnuAlarmIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuAlarmIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuAlarmIndication proto.InternalMessageInfo
+
+func (m *OnuAlarmIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuAlarmIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuAlarmIndication) GetLosStatus() string {
+	if m != nil {
+		return m.LosStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLobStatus() string {
+	if m != nil {
+		return m.LobStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLopcMissStatus() string {
+	if m != nil {
+		return m.LopcMissStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLopcMicErrorStatus() string {
+	if m != nil {
+		return m.LopcMicErrorStatus
+	}
+	return ""
+}
+
+type OnuStartupFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuStartupFailureIndication) Reset()         { *m = OnuStartupFailureIndication{} }
+func (m *OnuStartupFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuStartupFailureIndication) ProtoMessage()    {}
+func (*OnuStartupFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{26}
+}
+
+func (m *OnuStartupFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuStartupFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuStartupFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuStartupFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuStartupFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuStartupFailureIndication.Merge(m, src)
+}
+func (m *OnuStartupFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuStartupFailureIndication.Size(m)
+}
+func (m *OnuStartupFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuStartupFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuStartupFailureIndication proto.InternalMessageInfo
+
+func (m *OnuStartupFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuStartupFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuStartupFailureIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuSignalDegradeIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	InverseBitErrorRate  uint32   `protobuf:"fixed32,4,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuSignalDegradeIndication) Reset()         { *m = OnuSignalDegradeIndication{} }
+func (m *OnuSignalDegradeIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuSignalDegradeIndication) ProtoMessage()    {}
+func (*OnuSignalDegradeIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{27}
+}
+
+func (m *OnuSignalDegradeIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Unmarshal(m, b)
+}
+func (m *OnuSignalDegradeIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuSignalDegradeIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuSignalDegradeIndication.Merge(m, src)
+}
+func (m *OnuSignalDegradeIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Size(m)
+}
+func (m *OnuSignalDegradeIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuSignalDegradeIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuSignalDegradeIndication proto.InternalMessageInfo
+
+func (m *OnuSignalDegradeIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuSignalDegradeIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuSignalDegradeIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuSignalDegradeIndication) GetInverseBitErrorRate() uint32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+type OnuDriftOfWindowIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	Drift                uint32   `protobuf:"fixed32,4,opt,name=drift,proto3" json:"drift,omitempty"`
+	NewEqd               uint32   `protobuf:"fixed32,5,opt,name=new_eqd,json=newEqd,proto3" json:"new_eqd,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuDriftOfWindowIndication) Reset()         { *m = OnuDriftOfWindowIndication{} }
+func (m *OnuDriftOfWindowIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuDriftOfWindowIndication) ProtoMessage()    {}
+func (*OnuDriftOfWindowIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{28}
+}
+
+func (m *OnuDriftOfWindowIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Unmarshal(m, b)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuDriftOfWindowIndication.Merge(m, src)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Size(m)
+}
+func (m *OnuDriftOfWindowIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuDriftOfWindowIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuDriftOfWindowIndication proto.InternalMessageInfo
+
+func (m *OnuDriftOfWindowIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuDriftOfWindowIndication) GetDrift() uint32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetNewEqd() uint32 {
+	if m != nil {
+		return m.NewEqd
+	}
+	return 0
+}
+
+type OnuLossOfOmciChannelIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuLossOfOmciChannelIndication) Reset()         { *m = OnuLossOfOmciChannelIndication{} }
+func (m *OnuLossOfOmciChannelIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuLossOfOmciChannelIndication) ProtoMessage()    {}
+func (*OnuLossOfOmciChannelIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{29}
+}
+
+func (m *OnuLossOfOmciChannelIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Unmarshal(m, b)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuLossOfOmciChannelIndication.Merge(m, src)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Size(m)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuLossOfOmciChannelIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuLossOfOmciChannelIndication proto.InternalMessageInfo
+
+func (m *OnuLossOfOmciChannelIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuLossOfOmciChannelIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuLossOfOmciChannelIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuSignalsFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	InverseBitErrorRate  uint32   `protobuf:"fixed32,4,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuSignalsFailureIndication) Reset()         { *m = OnuSignalsFailureIndication{} }
+func (m *OnuSignalsFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuSignalsFailureIndication) ProtoMessage()    {}
+func (*OnuSignalsFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{30}
+}
+
+func (m *OnuSignalsFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuSignalsFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuSignalsFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuSignalsFailureIndication.Merge(m, src)
+}
+func (m *OnuSignalsFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Size(m)
+}
+func (m *OnuSignalsFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuSignalsFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuSignalsFailureIndication proto.InternalMessageInfo
+
+func (m *OnuSignalsFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuSignalsFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuSignalsFailureIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuSignalsFailureIndication) GetInverseBitErrorRate() uint32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+type OnuTransmissionInterferenceWarning struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	Drift                uint32   `protobuf:"fixed32,4,opt,name=drift,proto3" json:"drift,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuTransmissionInterferenceWarning) Reset()         { *m = OnuTransmissionInterferenceWarning{} }
+func (m *OnuTransmissionInterferenceWarning) String() string { return proto.CompactTextString(m) }
+func (*OnuTransmissionInterferenceWarning) ProtoMessage()    {}
+func (*OnuTransmissionInterferenceWarning) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{31}
+}
+
+func (m *OnuTransmissionInterferenceWarning) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Unmarshal(m, b)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Marshal(b, m, deterministic)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuTransmissionInterferenceWarning.Merge(m, src)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Size() int {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Size(m)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuTransmissionInterferenceWarning.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuTransmissionInterferenceWarning proto.InternalMessageInfo
+
+func (m *OnuTransmissionInterferenceWarning) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetDrift() uint32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+type OnuActivationFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuActivationFailureIndication) Reset()         { *m = OnuActivationFailureIndication{} }
+func (m *OnuActivationFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuActivationFailureIndication) ProtoMessage()    {}
+func (*OnuActivationFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{32}
+}
+
+func (m *OnuActivationFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuActivationFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuActivationFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuActivationFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuActivationFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuActivationFailureIndication.Merge(m, src)
+}
+func (m *OnuActivationFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuActivationFailureIndication.Size(m)
+}
+func (m *OnuActivationFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuActivationFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuActivationFailureIndication proto.InternalMessageInfo
+
+func (m *OnuActivationFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuActivationFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type OnuProcessingErrorIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuProcessingErrorIndication) Reset()         { *m = OnuProcessingErrorIndication{} }
+func (m *OnuProcessingErrorIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuProcessingErrorIndication) ProtoMessage()    {}
+func (*OnuProcessingErrorIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{33}
+}
+
+func (m *OnuProcessingErrorIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Unmarshal(m, b)
+}
+func (m *OnuProcessingErrorIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuProcessingErrorIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuProcessingErrorIndication.Merge(m, src)
+}
+func (m *OnuProcessingErrorIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Size(m)
+}
+func (m *OnuProcessingErrorIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuProcessingErrorIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuProcessingErrorIndication proto.InternalMessageInfo
+
+func (m *OnuProcessingErrorIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuProcessingErrorIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{34}
+}
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("openolt.DeviceInfo_DeviceResourceRanges_Pool_PoolType", DeviceInfo_DeviceResourceRanges_Pool_PoolType_name, DeviceInfo_DeviceResourceRanges_Pool_PoolType_value)
+	proto.RegisterEnum("openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType", DeviceInfo_DeviceResourceRanges_Pool_SharingType_name, DeviceInfo_DeviceResourceRanges_Pool_SharingType_value)
+	proto.RegisterType((*Indication)(nil), "openolt.Indication")
+	proto.RegisterType((*AlarmIndication)(nil), "openolt.AlarmIndication")
+	proto.RegisterType((*OltIndication)(nil), "openolt.OltIndication")
+	proto.RegisterType((*IntfIndication)(nil), "openolt.IntfIndication")
+	proto.RegisterType((*OnuDiscIndication)(nil), "openolt.OnuDiscIndication")
+	proto.RegisterType((*OnuIndication)(nil), "openolt.OnuIndication")
+	proto.RegisterType((*IntfOperIndication)(nil), "openolt.IntfOperIndication")
+	proto.RegisterType((*OmciIndication)(nil), "openolt.OmciIndication")
+	proto.RegisterType((*PacketIndication)(nil), "openolt.PacketIndication")
+	proto.RegisterType((*Interface)(nil), "openolt.Interface")
+	proto.RegisterType((*Heartbeat)(nil), "openolt.Heartbeat")
+	proto.RegisterType((*Onu)(nil), "openolt.Onu")
+	proto.RegisterType((*OmciMsg)(nil), "openolt.OmciMsg")
+	proto.RegisterType((*OnuPacket)(nil), "openolt.OnuPacket")
+	proto.RegisterType((*UplinkPacket)(nil), "openolt.UplinkPacket")
+	proto.RegisterType((*DeviceInfo)(nil), "openolt.DeviceInfo")
+	proto.RegisterType((*DeviceInfo_DeviceResourceRanges)(nil), "openolt.DeviceInfo.DeviceResourceRanges")
+	proto.RegisterType((*DeviceInfo_DeviceResourceRanges_Pool)(nil), "openolt.DeviceInfo.DeviceResourceRanges.Pool")
+	proto.RegisterType((*Classifier)(nil), "openolt.Classifier")
+	proto.RegisterType((*ActionCmd)(nil), "openolt.ActionCmd")
+	proto.RegisterType((*Action)(nil), "openolt.Action")
+	proto.RegisterType((*Flow)(nil), "openolt.Flow")
+	proto.RegisterType((*SerialNumber)(nil), "openolt.SerialNumber")
+	proto.RegisterType((*PortStatistics)(nil), "openolt.PortStatistics")
+	proto.RegisterType((*FlowStatistics)(nil), "openolt.FlowStatistics")
+	proto.RegisterType((*LosIndication)(nil), "openolt.LosIndication")
+	proto.RegisterType((*DyingGaspIndication)(nil), "openolt.DyingGaspIndication")
+	proto.RegisterType((*OnuAlarmIndication)(nil), "openolt.OnuAlarmIndication")
+	proto.RegisterType((*OnuStartupFailureIndication)(nil), "openolt.OnuStartupFailureIndication")
+	proto.RegisterType((*OnuSignalDegradeIndication)(nil), "openolt.OnuSignalDegradeIndication")
+	proto.RegisterType((*OnuDriftOfWindowIndication)(nil), "openolt.OnuDriftOfWindowIndication")
+	proto.RegisterType((*OnuLossOfOmciChannelIndication)(nil), "openolt.OnuLossOfOmciChannelIndication")
+	proto.RegisterType((*OnuSignalsFailureIndication)(nil), "openolt.OnuSignalsFailureIndication")
+	proto.RegisterType((*OnuTransmissionInterferenceWarning)(nil), "openolt.OnuTransmissionInterferenceWarning")
+	proto.RegisterType((*OnuActivationFailureIndication)(nil), "openolt.OnuActivationFailureIndication")
+	proto.RegisterType((*OnuProcessingErrorIndication)(nil), "openolt.OnuProcessingErrorIndication")
+	proto.RegisterType((*Empty)(nil), "openolt.Empty")
+}
+
+func init() { proto.RegisterFile("voltha_protos/openolt.proto", fileDescriptor_c072e7aa0dfd74d5) }
+
+var fileDescriptor_c072e7aa0dfd74d5 = []byte{
+	// 2990 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdd, 0x72, 0x1c, 0x47,
+	0xf5, 0xf7, 0x4a, 0xab, 0x9d, 0xdd, 0xb3, 0x1f, 0x92, 0x5b, 0xd6, 0x87, 0x25, 0xc5, 0x51, 0xcd,
+	0xdf, 0xff, 0xc4, 0x40, 0xc5, 0x8a, 0x9d, 0x14, 0x90, 0x40, 0x41, 0x64, 0x49, 0x8e, 0x16, 0x2c,
+	0xaf, 0x18, 0xc9, 0x31, 0x84, 0xa2, 0x26, 0xa3, 0x99, 0xde, 0xdd, 0x2e, 0xcd, 0x4e, 0x4f, 0xa6,
+	0x7b, 0x25, 0xb9, 0xb8, 0x0b, 0xf0, 0x02, 0xa4, 0xb8, 0x80, 0x2a, 0x9e, 0x80, 0x17, 0xa0, 0x8a,
+	0xcb, 0xdc, 0x52, 0xdc, 0xf0, 0x00, 0xdc, 0xf0, 0x18, 0x5c, 0x50, 0x7d, 0xba, 0x67, 0x76, 0x66,
+	0x77, 0x25, 0x5b, 0xc1, 0x14, 0x37, 0xaa, 0xe9, 0x73, 0x7e, 0xe7, 0xd7, 0x7d, 0xba, 0xcf, 0x39,
+	0xfd, 0xa1, 0x85, 0xf5, 0x33, 0x1e, 0xca, 0xbe, 0xe7, 0xc6, 0x09, 0x97, 0x5c, 0x6c, 0xf1, 0x98,
+	0x46, 0x3c, 0x94, 0xf7, 0xb1, 0x49, 0x2c, 0xd3, 0x5c, 0xdb, 0xe8, 0x71, 0xde, 0x0b, 0xe9, 0x96,
+	0x17, 0xb3, 0x2d, 0x2f, 0x8a, 0xb8, 0xf4, 0x24, 0xe3, 0x91, 0xd0, 0xb0, 0xb5, 0xcd, 0x22, 0x87,
+	0xa4, 0x7e, 0x5f, 0x7d, 0x77, 0x59, 0x48, 0x35, 0xc2, 0xfe, 0x6b, 0x19, 0xa0, 0x1d, 0x05, 0xcc,
+	0x47, 0x3b, 0xf2, 0x00, 0x2c, 0x1e, 0x4a, 0x97, 0x45, 0xc1, 0x6a, 0x69, 0xb3, 0x74, 0xaf, 0xfe,
+	0x70, 0xf9, 0x7e, 0xda, 0x71, 0x27, 0x94, 0x23, 0xe0, 0xfe, 0x0d, 0xa7, 0xc2, 0x51, 0x40, 0xde,
+	0x87, 0x2a, 0x8b, 0x64, 0x17, 0x6d, 0x66, 0xd0, 0x66, 0x25, 0xb3, 0x69, 0x47, 0xb2, 0x5b, 0x30,
+	0xb2, 0x98, 0x96, 0x90, 0x6d, 0x68, 0xa2, 0x15, 0x8f, 0x69, 0x82, 0xa6, 0xb3, 0x68, 0xba, 0x5e,
+	0x30, 0xed, 0xc4, 0x34, 0x29, 0x98, 0xd7, 0xd9, 0x48, 0x4a, 0x7e, 0x00, 0x0d, 0x1e, 0x0d, 0xdd,
+	0x80, 0x09, 0x1f, 0x19, 0xca, 0xc8, 0xb0, 0x36, 0x1a, 0x70, 0x34, 0xdc, 0x65, 0xc2, 0x2f, 0x10,
+	0x00, 0xcf, 0x84, 0xe8, 0x6b, 0x34, 0x44, 0xd3, 0xb9, 0x71, 0x5f, 0xa3, 0xe1, 0x98, 0xaf, 0x28,
+	0x50, 0xbe, 0xf2, 0x81, 0xcf, 0xd0, 0xa6, 0x32, 0xe6, 0x6b, 0x67, 0xe0, 0xb3, 0xa2, 0xaf, 0x5c,
+	0x4b, 0xc8, 0xfb, 0x60, 0xc5, 0xa7, 0x7a, 0x52, 0x2d, 0x34, 0xba, 0x9d, 0x19, 0x1d, 0x7a, 0xfe,
+	0x29, 0x1d, 0x9b, 0xd7, 0xf8, 0x14, 0xe7, 0xf5, 0xbb, 0x00, 0x31, 0x4f, 0xa4, 0x2b, 0xa4, 0x27,
+	0xc5, 0x6a, 0x75, 0xac, 0xb7, 0x43, 0x9e, 0xc8, 0x23, 0xb5, 0xd8, 0x42, 0x32, 0x5f, 0xec, 0xdf,
+	0x70, 0x6a, 0xb1, 0x91, 0x08, 0x65, 0xd9, 0x0d, 0xf9, 0xb9, 0xb1, 0xac, 0x8d, 0x59, 0x3e, 0x0e,
+	0xf9, 0x79, 0xd1, 0xb2, 0x6b, 0x24, 0x82, 0x7c, 0x07, 0x6a, 0x5e, 0xe8, 0x25, 0x03, 0x1c, 0x2b,
+	0xa0, 0xe1, 0x6a, 0x66, 0xb8, 0xad, 0x34, 0x85, 0xa1, 0x56, 0x3d, 0x23, 0x7a, 0x54, 0x81, 0x72,
+	0xe0, 0x49, 0xcf, 0xfe, 0x93, 0x05, 0xf3, 0x63, 0x38, 0x35, 0xcf, 0x21, 0x17, 0x53, 0x63, 0xea,
+	0x09, 0x17, 0x45, 0xdf, 0x43, 0x14, 0x90, 0x5d, 0x68, 0x05, 0x2f, 0x58, 0xd4, 0x73, 0x7b, 0x9e,
+	0x88, 0x73, 0x91, 0xb5, 0x91, 0x59, 0xee, 0x2a, 0xf5, 0xc7, 0x9e, 0x88, 0x0b, 0xf6, 0x8d, 0x20,
+	0x27, 0x56, 0x31, 0xa6, 0x16, 0x78, 0xe4, 0xd1, 0x78, 0x8c, 0x75, 0xa2, 0xe1, 0xa4, 0x53, 0x75,
+	0x3e, 0x92, 0x92, 0xe7, 0x70, 0x4b, 0x51, 0x08, 0xe9, 0x25, 0x72, 0x18, 0xbb, 0x5d, 0x8f, 0x85,
+	0xb9, 0x58, 0xbb, 0x9b, 0x67, 0x3a, 0xd2, 0x98, 0xc7, 0x1e, 0x0b, 0x87, 0x09, 0x2d, 0x50, 0xde,
+	0xe4, 0x05, 0xb5, 0x22, 0xfe, 0x14, 0x96, 0x91, 0x98, 0xf5, 0x22, 0x2f, 0x74, 0x03, 0xda, 0x4b,
+	0xbc, 0x80, 0xe6, 0x62, 0xf1, 0xff, 0x0a, 0xd4, 0x88, 0xda, 0xd5, 0xa0, 0x02, 0xf3, 0x22, 0x9f,
+	0xd4, 0x92, 0x9f, 0xc3, 0x0a, 0x26, 0x46, 0xc2, 0xba, 0xd2, 0xe5, 0x5d, 0xf7, 0x9c, 0x45, 0x01,
+	0x3f, 0xcf, 0x05, 0x6d, 0x81, 0x7c, 0x57, 0xc1, 0x3a, 0xdd, 0xe7, 0x08, 0x9a, 0x20, 0x1f, 0xd7,
+	0x92, 0x63, 0x50, 0xde, 0xb8, 0x21, 0x17, 0xc2, 0xcd, 0x72, 0x41, 0x87, 0xf5, 0xdb, 0x79, 0xda,
+	0x27, 0x5c, 0x88, 0x4e, 0x57, 0x25, 0xc5, 0x4e, 0xdf, 0x8b, 0x22, 0x1a, 0x16, 0xa8, 0x5b, 0xdc,
+	0x20, 0x4c, 0x8a, 0xa4, 0xf3, 0x8c, 0xae, 0x88, 0xd1, 0x3c, 0x57, 0xa7, 0xcc, 0xb3, 0xc6, 0x5c,
+	0x3a, 0xcf, 0x23, 0xb5, 0x22, 0xee, 0xe8, 0x22, 0x21, 0xd9, 0xb9, 0x1e, 0xa9, 0xce, 0x86, 0x6f,
+	0xe5, 0x09, 0x8f, 0x13, 0x2f, 0x12, 0x03, 0x26, 0x04, 0xe3, 0x51, 0x3b, 0x92, 0x34, 0xe9, 0xd2,
+	0x84, 0x46, 0x3e, 0x7d, 0xee, 0x25, 0x11, 0x8b, 0x7a, 0xa6, 0x6a, 0x1c, 0xb3, 0x73, 0x1c, 0xe9,
+	0x67, 0x7a, 0x72, 0x3d, 0x5f, 0xb2, 0x33, 0xec, 0x77, 0x34, 0x58, 0x98, 0x9c, 0x85, 0xed, 0x0c,
+	0x36, 0x6d, 0xbc, 0xca, 0xe7, 0x22, 0x42, 0xf7, 0xb0, 0xaa, 0x7a, 0x88, 0x13, 0xee, 0x53, 0x21,
+	0x54, 0x16, 0xd0, 0x24, 0xe1, 0xba, 0x4a, 0xd6, 0xb1, 0x8b, 0xff, 0xcf, 0x77, 0x71, 0x98, 0xe1,
+	0xf6, 0x14, 0xac, 0xd0, 0xc1, 0x12, 0x9f, 0xa6, 0xcf, 0xb2, 0xf5, 0x3e, 0x34, 0x0b, 0x55, 0x9d,
+	0xbc, 0x01, 0x80, 0x05, 0x59, 0x55, 0x0e, 0x8a, 0xd9, 0x5a, 0x73, 0x6a, 0x4a, 0xa2, 0xca, 0x03,
+	0xb5, 0xf7, 0xa1, 0x55, 0xac, 0xe8, 0x64, 0x05, 0x2c, 0x5d, 0xfc, 0x75, 0x6e, 0x5b, 0x4e, 0x05,
+	0x0b, 0x7c, 0x30, 0xc6, 0x34, 0x33, 0xce, 0xd4, 0x87, 0x9b, 0x13, 0xe5, 0xf9, 0x72, 0xb2, 0x0f,
+	0xa1, 0x29, 0x68, 0xc2, 0xbc, 0xd0, 0x8d, 0x86, 0x83, 0x13, 0x9a, 0x98, 0x6a, 0xb0, 0x94, 0x4d,
+	0xc3, 0x11, 0x6a, 0x9f, 0xa2, 0xd2, 0x69, 0x88, 0x5c, 0xcb, 0xfe, 0x4b, 0x09, 0x9a, 0x85, 0x72,
+	0x7e, 0x79, 0x37, 0x4b, 0x50, 0xc1, 0x0d, 0x41, 0x57, 0x1b, 0xcb, 0x99, 0x53, 0x55, 0x7f, 0xdc,
+	0x95, 0xd9, 0x31, 0x57, 0xc8, 0x9b, 0x50, 0xf7, 0x82, 0x01, 0x8b, 0x8c, 0x7e, 0x0e, 0xf5, 0x80,
+	0x22, 0x0d, 0x98, 0x18, 0x7d, 0xf9, 0xd5, 0x47, 0xff, 0x19, 0x90, 0xc9, 0x8d, 0x90, 0x10, 0x28,
+	0xcb, 0x17, 0x71, 0xba, 0x40, 0xf8, 0x9d, 0xf7, 0x6a, 0xe6, 0x8a, 0x95, 0x18, 0x1f, 0xbe, 0xed,
+	0x40, 0xab, 0xb8, 0x73, 0x5d, 0x7b, 0x7e, 0x16, 0x60, 0x36, 0x3e, 0x95, 0xc8, 0xdc, 0x70, 0xd4,
+	0xa7, 0xfd, 0x55, 0x09, 0x16, 0xc6, 0x77, 0x36, 0xb2, 0x0e, 0x35, 0xa4, 0xc5, 0x91, 0xeb, 0x59,
+	0xc2, 0x83, 0xc3, 0xf1, 0xd8, 0xe8, 0x27, 0xe2, 0xa8, 0x47, 0x07, 0xb8, 0x11, 0x66, 0xfd, 0xd6,
+	0x8c, 0xa4, 0x1d, 0x28, 0x3b, 0xdc, 0xea, 0x98, 0x2e, 0xee, 0x96, 0x53, 0x51, 0x4d, 0xad, 0x40,
+	0xa3, 0x88, 0x63, 0xcd, 0xb3, 0x9c, 0x8a, 0x6a, 0x3e, 0xe5, 0x64, 0x19, 0x2a, 0x3e, 0xe7, 0xa7,
+	0x8c, 0x62, 0xd1, 0xaa, 0x38, 0xa6, 0x95, 0x7a, 0x51, 0x1e, 0x79, 0x71, 0x17, 0x6a, 0xba, 0x1c,
+	0x78, 0xfe, 0xe5, 0x03, 0xb4, 0xbf, 0x0f, 0xb5, 0x7d, 0xea, 0x25, 0xf2, 0x84, 0x7a, 0x92, 0x6c,
+	0xc1, 0x62, 0x3f, 0x6d, 0xe8, 0x62, 0x26, 0x87, 0x09, 0x35, 0x16, 0x24, 0x53, 0x1d, 0xa5, 0x1a,
+	0xfb, 0x57, 0x25, 0x98, 0xed, 0x44, 0xc3, 0x6b, 0xcf, 0xf9, 0x44, 0x4c, 0xcd, 0xbe, 0x72, 0x4c,
+	0xa1, 0xa7, 0x4c, 0x47, 0xa1, 0xe5, 0xa8, 0x4f, 0xfb, 0xc7, 0x60, 0xa9, 0x18, 0x38, 0x10, 0xbd,
+	0xd7, 0xb0, 0xf8, 0xbf, 0x29, 0x41, 0x4d, 0x95, 0x25, 0x5c, 0xff, 0x6b, 0xf3, 0xe5, 0xd6, 0xad,
+	0x5c, 0x58, 0xb7, 0x62, 0x20, 0xcc, 0x8d, 0x07, 0xc2, 0xe4, 0x38, 0x3e, 0x80, 0xc6, 0xb3, 0x38,
+	0x64, 0xd1, 0xe9, 0xcb, 0x46, 0x62, 0x4c, 0x67, 0x46, 0xa6, 0xbf, 0xad, 0x01, 0xec, 0xd2, 0x33,
+	0xe6, 0xd3, 0x76, 0xd4, 0xc5, 0x90, 0x39, 0xa3, 0x51, 0xc0, 0x13, 0x93, 0x70, 0xa6, 0x45, 0x6e,
+	0xc1, 0xdc, 0x80, 0x07, 0x34, 0x34, 0xe5, 0x4d, 0x37, 0xc8, 0x37, 0x60, 0xa1, 0xef, 0x25, 0xc1,
+	0xb9, 0x97, 0x50, 0xf7, 0x8c, 0x26, 0x6a, 0x57, 0x31, 0x59, 0x37, 0x9f, 0xca, 0x3f, 0xd1, 0x62,
+	0x05, 0xed, 0xb2, 0x64, 0x50, 0x80, 0x96, 0x35, 0x34, 0x95, 0xa7, 0xd0, 0x75, 0xa8, 0x05, 0x38,
+	0x22, 0x35, 0xfe, 0x05, 0x9d, 0x3d, 0x5a, 0xd0, 0x0e, 0xc8, 0xbb, 0x70, 0xcb, 0x28, 0x8b, 0x41,
+	0x71, 0x13, 0x71, 0x44, 0xeb, 0xf2, 0x11, 0xa1, 0xe8, 0x62, 0x1e, 0xb9, 0x6a, 0xf2, 0xc4, 0x6a,
+	0x03, 0xa7, 0xa3, 0x1a, 0xf3, 0x48, 0x9d, 0x2a, 0x05, 0xb9, 0x03, 0xa0, 0x6e, 0x0a, 0x11, 0x0f,
+	0x79, 0xef, 0x45, 0x5a, 0xd0, 0x46, 0x12, 0xb2, 0xa9, 0xf7, 0x54, 0x16, 0xe8, 0x73, 0x91, 0x49,
+	0x30, 0xc0, 0x05, 0xc4, 0x63, 0x0e, 0xd9, 0x00, 0x30, 0x08, 0x6a, 0x4e, 0x07, 0x96, 0x53, 0x45,
+	0xfd, 0x5e, 0x14, 0x90, 0xbb, 0xd0, 0xf2, 0xc2, 0x90, 0xfb, 0x23, 0x86, 0x2a, 0x22, 0x1a, 0x28,
+	0x4d, 0x39, 0x36, 0xa1, 0x91, 0xa1, 0xa8, 0xd9, 0xb9, 0x2d, 0x07, 0x0c, 0x46, 0xf1, 0xdc, 0x83,
+	0x85, 0x51, 0x48, 0x18, 0x26, 0x40, 0x54, 0x2b, 0x0b, 0x0c, 0xcd, 0x75, 0x17, 0x5a, 0x39, 0x24,
+	0x35, 0x1b, 0xa9, 0xe5, 0x34, 0x32, 0x9c, 0xe2, 0xb3, 0xa1, 0x69, 0x8a, 0x89, 0x21, 0x6b, 0x22,
+	0xa8, 0xae, 0x4b, 0x8a, 0x66, 0xba, 0x03, 0xf5, 0x14, 0xa3, 0x68, 0x5a, 0x3a, 0x0e, 0x35, 0x42,
+	0x71, 0x7c, 0x04, 0x95, 0xc4, 0x8b, 0x7a, 0x54, 0xac, 0xce, 0x6f, 0xce, 0xde, 0xab, 0x3f, 0xbc,
+	0x37, 0x3a, 0xb1, 0x66, 0x01, 0x65, 0x3e, 0x1d, 0x2a, 0xf8, 0x30, 0xf1, 0xa9, 0x83, 0x78, 0xc7,
+	0xd8, 0xad, 0xfd, 0xae, 0x0c, 0xb7, 0xa6, 0x01, 0xc8, 0xed, 0xf4, 0xa2, 0x15, 0x88, 0xd5, 0xd2,
+	0xe6, 0xec, 0x3d, 0xcb, 0xdc, 0xa6, 0x82, 0xf1, 0x15, 0x9b, 0x99, 0x58, 0xb1, 0x1d, 0x98, 0x8b,
+	0x39, 0x0f, 0xc5, 0xea, 0x2c, 0x0e, 0xea, 0x9d, 0x57, 0x1d, 0xd4, 0xfd, 0x43, 0xce, 0x43, 0x47,
+	0xdb, 0xae, 0xfd, 0x6b, 0x06, 0xca, 0xaa, 0x4d, 0x7e, 0x94, 0xdb, 0x7e, 0x5a, 0x0f, 0xbf, 0x7d,
+	0x2d, 0x32, 0xfc, 0xa3, 0x4a, 0xbe, 0xd9, 0xb6, 0x8e, 0xc0, 0x12, 0x7d, 0x2f, 0x61, 0x51, 0x0f,
+	0x87, 0xdd, 0x7a, 0xf8, 0xc1, 0xf5, 0xe8, 0x8e, 0xb4, 0x31, 0x32, 0xa6, 0x4c, 0x2a, 0x31, 0xf5,
+	0x02, 0xea, 0x3d, 0x41, 0x37, 0x54, 0x9e, 0x53, 0x73, 0x74, 0xb7, 0x1c, 0xf5, 0x69, 0x6f, 0x43,
+	0x35, 0x1d, 0x0e, 0x01, 0xa8, 0x74, 0x9e, 0x3e, 0x73, 0xdb, 0xbb, 0x0b, 0x37, 0x48, 0x03, 0xaa,
+	0xdb, 0x4f, 0x9e, 0x74, 0x76, 0x54, 0xab, 0x44, 0x5a, 0x00, 0x1f, 0xef, 0x1d, 0x1c, 0x76, 0x9c,
+	0x63, 0xd5, 0x9e, 0x21, 0x75, 0xb0, 0x1e, 0x3f, 0xe9, 0x3c, 0x57, 0x8d, 0x59, 0xbb, 0x0f, 0xf5,
+	0xdc, 0x10, 0xc8, 0x32, 0x90, 0xdd, 0xbd, 0xdd, 0xf6, 0xce, 0xf6, 0xf1, 0xde, 0xae, 0x7b, 0xb8,
+	0xe7, 0xb8, 0xed, 0xa7, 0xc7, 0x8f, 0x17, 0x6e, 0x90, 0x37, 0x61, 0xfd, 0x68, 0x7f, 0xdb, 0xd9,
+	0xdb, 0x75, 0x1f, 0xfd, 0xcc, 0xdd, 0x7e, 0xf2, 0x04, 0xe5, 0xf8, 0x71, 0xbc, 0xb7, 0xb3, 0xbf,
+	0x50, 0x22, 0x9b, 0xb0, 0x31, 0x05, 0x70, 0xb4, 0x7d, 0xb0, 0xa7, 0x11, 0x33, 0xf6, 0xaf, 0x67,
+	0x01, 0x76, 0x42, 0x4f, 0x08, 0xd6, 0x65, 0x34, 0xc1, 0xfa, 0xe9, 0xca, 0x38, 0xab, 0x66, 0x73,
+	0xfc, 0x38, 0x66, 0x01, 0x59, 0x84, 0x39, 0xee, 0x9e, 0x65, 0x55, 0xb5, 0xcc, 0x3f, 0x61, 0x58,
+	0x6b, 0x99, 0xc6, 0x9a, 0x09, 0x61, 0x29, 0x96, 0x21, 0x56, 0x4f, 0x49, 0x99, 0x29, 0xec, 0x0a,
+	0x58, 0xdc, 0x8d, 0x4f, 0x98, 0x14, 0xa6, 0xc8, 0x56, 0xf8, 0xa1, 0x6a, 0x61, 0xfd, 0x34, 0x0a,
+	0xb3, 0xa3, 0x32, 0xad, 0xb8, 0x0d, 0x55, 0x2a, 0xfb, 0x7a, 0x5f, 0xd7, 0xa9, 0x6e, 0x51, 0xd9,
+	0x4f, 0xb7, 0xf5, 0x40, 0x48, 0x77, 0xe0, 0xf9, 0x98, 0xe2, 0x0d, 0xa7, 0x12, 0x08, 0x79, 0xe0,
+	0xf9, 0x4a, 0x21, 0x12, 0x1f, 0x15, 0x35, 0xad, 0x10, 0x89, 0xaf, 0x14, 0x2a, 0xc8, 0x63, 0xfd,
+	0x5e, 0x61, 0x72, 0xd9, 0x62, 0xf1, 0x21, 0xbe, 0x79, 0x2c, 0x81, 0xb2, 0x76, 0x59, 0x6c, 0x92,
+	0x77, 0x2e, 0x10, 0xb2, 0x1d, 0x2b, 0xb1, 0xa2, 0x62, 0xb1, 0xa9, 0x63, 0x73, 0x22, 0xf1, 0xdb,
+	0xb1, 0x22, 0x52, 0x62, 0x95, 0xdd, 0x26, 0x8f, 0x55, 0x8f, 0xaa, 0xc0, 0x29, 0x95, 0x22, 0x42,
+	0x95, 0x4e, 0x60, 0x35, 0x4a, 0x54, 0x6d, 0x42, 0x43, 0x5d, 0xd5, 0xa5, 0xd7, 0xd3, 0xfe, 0xcc,
+	0xeb, 0x54, 0x8a, 0x4f, 0xe5, 0xb1, 0x87, 0x2b, 0x6c, 0xff, 0x12, 0x6a, 0xea, 0xc8, 0xce, 0xa3,
+	0x9d, 0x01, 0x56, 0x0c, 0x2f, 0x08, 0x5c, 0x3e, 0x94, 0x34, 0x51, 0x46, 0xb8, 0x16, 0x55, 0xa7,
+	0xee, 0x05, 0x41, 0x47, 0xc9, 0x8e, 0xbd, 0x9e, 0xaa, 0x52, 0x09, 0x1d, 0xf0, 0x33, 0x9a, 0x83,
+	0xcd, 0x20, 0xac, 0xa5, 0xe5, 0x19, 0x72, 0x13, 0x1a, 0x32, 0xf1, 0x62, 0x57, 0x72, 0xb7, 0xcf,
+	0x85, 0x8e, 0xde, 0xaa, 0x03, 0x4a, 0x76, 0xcc, 0xf7, 0xb9, 0x90, 0xf6, 0x9f, 0x4b, 0x50, 0xd1,
+	0xbd, 0x93, 0xbb, 0x30, 0xeb, 0x0f, 0xd2, 0x1b, 0x35, 0x19, 0x5d, 0xd2, 0xd3, 0xb1, 0x39, 0x4a,
+	0x3d, 0x3d, 0x1c, 0x72, 0x4b, 0x3c, 0x5b, 0x58, 0xe2, 0x51, 0x4c, 0x95, 0xc7, 0x62, 0x4a, 0xc7,
+	0xc9, 0x5c, 0x31, 0x4e, 0xa6, 0x87, 0xc3, 0x28, 0xd8, 0xac, 0x5c, 0xb0, 0xd9, 0x7f, 0x9c, 0x85,
+	0xf2, 0xe3, 0x90, 0x9f, 0x63, 0xf5, 0xf7, 0xd5, 0x8d, 0xc4, 0xcd, 0x6f, 0xc7, 0xf3, 0x4e, 0x43,
+	0x4b, 0xdb, 0xd3, 0x8e, 0x07, 0xf3, 0xe9, 0xf1, 0x60, 0x09, 0x2a, 0xc3, 0x88, 0x29, 0x71, 0x5d,
+	0x8b, 0x87, 0x11, 0xbb, 0xea, 0x18, 0xb8, 0x0e, 0x58, 0x9b, 0xf5, 0x62, 0xea, 0xad, 0xb5, 0xaa,
+	0x04, 0x18, 0x9d, 0xb7, 0xa1, 0x9a, 0xee, 0x30, 0x18, 0x6b, 0xf3, 0x8e, 0x65, 0x76, 0x17, 0xf2,
+	0x16, 0xcc, 0x47, 0x54, 0x9e, 0xf3, 0xe4, 0x34, 0x1b, 0xe5, 0x1c, 0x22, 0x9a, 0x46, 0xdc, 0x9e,
+	0x76, 0x3c, 0xad, 0x20, 0x24, 0x77, 0x2a, 0x79, 0x0f, 0xc0, 0xcf, 0x52, 0xd6, 0xdc, 0x92, 0x17,
+	0xb3, 0xb5, 0x1a, 0x65, 0xb3, 0x93, 0x83, 0x91, 0xb7, 0xa1, 0xe2, 0xe1, 0x2a, 0x9a, 0xdb, 0xef,
+	0xfc, 0xd8, 0xe2, 0x3a, 0x46, 0x4d, 0xd6, 0xa0, 0x1a, 0x27, 0x8c, 0x27, 0x4c, 0xbe, 0xc0, 0x2c,
+	0x9a, 0x77, 0xb2, 0x76, 0xee, 0x98, 0xdb, 0x28, 0x1c, 0x73, 0x73, 0xe7, 0xab, 0x66, 0xfe, 0x7c,
+	0x65, 0x1f, 0x43, 0x63, 0xfc, 0x84, 0xa0, 0x8f, 0x39, 0xe9, 0x0a, 0x35, 0x9c, 0xaa, 0x16, 0xb4,
+	0x03, 0xf2, 0x36, 0xcc, 0x1b, 0xa5, 0x88, 0xa9, 0xcf, 0xba, 0xcc, 0x37, 0xc7, 0xa7, 0x96, 0x16,
+	0x1f, 0x19, 0xa9, 0xfd, 0xb7, 0x32, 0xb4, 0x8a, 0x4f, 0x55, 0x97, 0x9f, 0xc3, 0x6e, 0x43, 0x35,
+	0xb9, 0x70, 0x4f, 0x5e, 0x48, 0x2a, 0x90, 0xad, 0xe2, 0x58, 0xc9, 0xc5, 0x23, 0xd5, 0x54, 0xd3,
+	0x9c, 0x5c, 0xb8, 0x31, 0x1e, 0xe4, 0x74, 0xd0, 0x56, 0x9c, 0x5a, 0x72, 0xa1, 0x4f, 0x76, 0x02,
+	0x53, 0xec, 0xc2, 0x1d, 0xfa, 0x9e, 0xca, 0x6a, 0x03, 0x2a, 0x23, 0xa8, 0x95, 0x5c, 0x3c, 0x53,
+	0xe2, 0x22, 0x72, 0x50, 0x40, 0xce, 0xa5, 0xc8, 0x83, 0x49, 0xe4, 0x49, 0x01, 0x59, 0x49, 0x91,
+	0x8f, 0x26, 0x91, 0xfa, 0x8e, 0x9e, 0x22, 0xad, 0x14, 0x89, 0x77, 0xee, 0x14, 0x79, 0x1b, 0xaa,
+	0x32, 0xf5, 0xb0, 0xaa, 0x3d, 0x94, 0x23, 0x0f, 0xe5, 0xc8, 0xc3, 0x9a, 0xf6, 0x50, 0xe6, 0x3d,
+	0x94, 0xe3, 0x1e, 0x82, 0xee, 0x43, 0x4e, 0x78, 0x28, 0xc7, 0x3d, 0xac, 0xa7, 0xc8, 0x83, 0x49,
+	0x64, 0xd1, 0xc3, 0x46, 0x8a, 0x7c, 0x34, 0x89, 0x2c, 0x7a, 0xd8, 0x4c, 0x91, 0x05, 0x0f, 0x6d,
+	0x68, 0x26, 0x17, 0xae, 0x9f, 0xf8, 0x1a, 0x2d, 0xb0, 0xbe, 0x56, 0x9c, 0x7a, 0x72, 0xb1, 0x93,
+	0xf8, 0x88, 0x44, 0x57, 0x4f, 0x58, 0x9c, 0x02, 0xe6, 0xb5, 0xab, 0x27, 0x2c, 0x36, 0xea, 0x0d,
+	0xa8, 0x49, 0x36, 0xa0, 0x42, 0x7a, 0x83, 0x18, 0x4f, 0xba, 0x96, 0x33, 0x12, 0xa8, 0xeb, 0x7c,
+	0xab, 0xf8, 0x82, 0x99, 0x4f, 0xfe, 0x52, 0x21, 0xf9, 0xbf, 0x7e, 0x40, 0x7d, 0xfd, 0x85, 0xba,
+	0x7a, 0xf4, 0x1f, 0x41, 0xb3, 0xf0, 0xe4, 0x79, 0x79, 0x32, 0x2c, 0x43, 0x45, 0x5d, 0xd8, 0x87,
+	0xc2, 0x9c, 0xe6, 0x4c, 0xcb, 0xfe, 0x05, 0x2c, 0x4e, 0x79, 0xfa, 0xbc, 0xf6, 0x35, 0x6b, 0x44,
+	0x3f, 0x5b, 0xa0, 0xff, 0x47, 0x09, 0xc8, 0xe4, 0xab, 0xe8, 0xd7, 0x79, 0x32, 0x09, 0xb9, 0x70,
+	0x0b, 0x5d, 0xd4, 0x42, 0x2e, 0x8e, 0x50, 0xa0, 0xd5, 0x27, 0xa9, 0xba, 0x9c, 0xaa, 0x4f, 0x8c,
+	0xfa, 0x1e, 0x2c, 0x84, 0x3c, 0xf6, 0xdd, 0x01, 0x13, 0x19, 0x87, 0xbe, 0x85, 0xb4, 0x94, 0xfc,
+	0x80, 0x89, 0x94, 0xe8, 0x01, 0x2c, 0x19, 0xa4, 0x09, 0xb8, 0x14, 0x5e, 0xd1, 0x37, 0x1f, 0x0d,
+	0xd7, 0x81, 0xa7, 0x4d, 0x6c, 0x0a, 0xeb, 0x57, 0x3c, 0xd6, 0xbe, 0xb6, 0x89, 0xfc, 0x7d, 0x09,
+	0xd6, 0x2e, 0x7f, 0xb9, 0x7d, 0x5d, 0xdd, 0x90, 0xf7, 0x60, 0x99, 0x45, 0xea, 0xea, 0x48, 0xdd,
+	0x13, 0x26, 0xcd, 0x1c, 0x24, 0x9e, 0xa4, 0x66, 0x07, 0x5f, 0x34, 0xda, 0x47, 0x4c, 0xe2, 0x24,
+	0x38, 0x9e, 0xa4, 0xf6, 0x97, 0x7a, 0x6c, 0x97, 0x3c, 0xfc, 0xbe, 0xb6, 0xb1, 0xdd, 0x82, 0x39,
+	0x7c, 0x82, 0x4e, 0x0f, 0x13, 0xd8, 0x50, 0xec, 0x11, 0x3d, 0x77, 0xe9, 0xe7, 0xe9, 0x71, 0xa2,
+	0x12, 0xd1, 0xf3, 0xbd, 0xcf, 0x03, 0xbb, 0x0f, 0x77, 0xae, 0x7e, 0x36, 0x7e, 0x6d, 0x6b, 0xf3,
+	0x87, 0x92, 0x8e, 0x81, 0x4b, 0x1e, 0x92, 0xff, 0xb7, 0x8b, 0xf3, 0x45, 0x09, 0xec, 0x97, 0x3f,
+	0x4a, 0xff, 0x77, 0x17, 0xc9, 0x3e, 0xc4, 0xb5, 0xb8, 0xe2, 0xf1, 0xfa, 0xba, 0xfd, 0xdb, 0x4f,
+	0x61, 0xe3, 0xaa, 0xb7, 0xea, 0x6b, 0xf3, 0x59, 0x30, 0xb7, 0x37, 0x88, 0xe5, 0x8b, 0x87, 0x5f,
+	0x35, 0xc1, 0xea, 0xe8, 0xf3, 0x11, 0xd9, 0x05, 0xd8, 0x65, 0xc2, 0x3b, 0x09, 0x69, 0x27, 0x94,
+	0xa4, 0x95, 0x9d, 0x9b, 0x10, 0xb9, 0x36, 0xd6, 0xb6, 0x97, 0xbf, 0xf8, 0xfb, 0x3f, 0xbf, 0x9c,
+	0x59, 0xb0, 0xeb, 0x5b, 0x67, 0x0f, 0xb6, 0x8c, 0xdd, 0x87, 0xa5, 0x6f, 0x92, 0xc7, 0x50, 0x77,
+	0x28, 0x8d, 0x5e, 0x95, 0x66, 0x05, 0x69, 0x6e, 0xda, 0x0d, 0x45, 0x93, 0x1a, 0x2a, 0x9e, 0x3d,
+	0xa8, 0x9b, 0x19, 0xa4, 0x9d, 0x68, 0x48, 0x1a, 0xf9, 0x47, 0xfb, 0x09, 0x96, 0x55, 0x64, 0x21,
+	0x76, 0x53, 0xb1, 0xec, 0xe9, 0xce, 0xa3, 0xa1, 0xa2, 0xd9, 0x87, 0xe6, 0x2e, 0xf5, 0x5e, 0x99,
+	0xe8, 0x36, 0x12, 0x2d, 0xda, 0xad, 0x9c, 0x57, 0x86, 0x69, 0x07, 0x6a, 0xbb, 0x34, 0xa4, 0xd7,
+	0x1e, 0x4e, 0x66, 0xa4, 0x48, 0xda, 0x00, 0xe6, 0xad, 0xb0, 0x33, 0x94, 0x64, 0xa1, 0xf0, 0xef,
+	0xcf, 0x03, 0xd1, 0xbb, 0x7a, 0x3c, 0x23, 0x4b, 0x45, 0xd5, 0x81, 0x46, 0xf6, 0x50, 0xa8, 0xc8,
+	0x48, 0xe1, 0xdf, 0x1a, 0x28, 0x9e, 0xa0, 0x5b, 0x47, 0xba, 0x25, 0x7b, 0x01, 0xe9, 0x72, 0xd6,
+	0x8a, 0xf0, 0xa7, 0x30, 0x9f, 0x7f, 0xf2, 0x53, 0x9c, 0xa3, 0x17, 0xd1, 0xbc, 0x66, 0x82, 0xf6,
+	0x0e, 0xd2, 0xae, 0xda, 0x8b, 0x8a, 0x76, 0x8c, 0x43, 0x31, 0x7f, 0x04, 0x96, 0x3a, 0x75, 0x6c,
+	0x07, 0x01, 0x69, 0x16, 0xfe, 0x93, 0x7a, 0x75, 0x54, 0x19, 0x1b, 0x1d, 0x55, 0xa0, 0x5a, 0x0e,
+	0x5e, 0xf9, 0x5e, 0x46, 0x52, 0x98, 0xb4, 0x91, 0x99, 0xe2, 0x39, 0x82, 0x56, 0xf6, 0xde, 0xbc,
+	0xd3, 0xa7, 0xfe, 0xe9, 0x44, 0x80, 0x8e, 0xa6, 0x31, 0x03, 0xda, 0x6f, 0x20, 0xe1, 0x8a, 0x4d,
+	0x14, 0x61, 0xd1, 0x5e, 0x91, 0x1e, 0x40, 0x5d, 0xc7, 0xdc, 0x21, 0x8f, 0xda, 0xdd, 0xdc, 0x42,
+	0x64, 0x0f, 0xe0, 0x13, 0x43, 0x5c, 0x43, 0xc6, 0x5b, 0xf6, 0xfc, 0x28, 0x60, 0xd1, 0xd8, 0x2c,
+	0xac, 0x89, 0xbc, 0x57, 0xe7, 0x2b, 0x2c, 0x6c, 0xde, 0x5a, 0x11, 0x3a, 0xd0, 0xfc, 0x98, 0xca,
+	0xdc, 0x93, 0xec, 0xb8, 0xcf, 0x8b, 0x53, 0x5e, 0x8d, 0xec, 0x0d, 0xa4, 0x5c, 0xb6, 0x6f, 0x2a,
+	0xca, 0x82, 0xbd, 0xe2, 0xfc, 0x21, 0x54, 0x1c, 0x7a, 0xc2, 0xf9, 0xcb, 0x33, 0x7c, 0x09, 0x79,
+	0xe6, 0x6d, 0xd0, 0x19, 0xae, 0x6c, 0x14, 0xc1, 0x33, 0xb8, 0xb9, 0xc3, 0xc3, 0x90, 0xfa, 0xf9,
+	0xdb, 0xcd, 0xcb, 0xb8, 0x36, 0x91, 0x6b, 0xcd, 0x5e, 0x52, 0x5c, 0x13, 0xe6, 0x8a, 0x36, 0x81,
+	0x95, 0x9d, 0x84, 0x7a, 0x92, 0x1e, 0x27, 0x5e, 0xb7, 0xcb, 0xfc, 0x23, 0xbf, 0x4f, 0x83, 0x61,
+	0x48, 0x13, 0x41, 0xde, 0xbc, 0x5f, 0xf8, 0x05, 0xc7, 0x04, 0x60, 0xa2, 0xb7, 0xb7, 0xb0, 0xb7,
+	0x4d, 0x7b, 0x1d, 0x7b, 0x9b, 0xce, 0x6a, 0xfa, 0xd4, 0x11, 0xf6, 0xba, 0xfb, 0xbc, 0x84, 0x55,
+	0xf5, 0xd9, 0x85, 0xc5, 0xc2, 0x88, 0x7e, 0x32, 0xa4, 0x43, 0x2a, 0xc8, 0xfa, 0xd4, 0xfe, 0xb4,
+	0x72, 0xa2, 0x2f, 0x1b, 0xfb, 0xda, 0xb0, 0x57, 0x26, 0xfc, 0xd3, 0x06, 0xa6, 0x9f, 0xc2, 0x28,
+	0xfe, 0xe3, 0x7e, 0xa6, 0xb0, 0xa9, 0x7e, 0xbe, 0x07, 0x0b, 0x3a, 0x0d, 0x72, 0xbb, 0xda, 0xe5,
+	0x61, 0x3a, 0x02, 0xd9, 0x37, 0xde, 0x2d, 0x3d, 0x7a, 0xf0, 0xe9, 0x56, 0x8f, 0xc9, 0xfe, 0xf0,
+	0xe4, 0xbe, 0xcf, 0x07, 0xf8, 0x5b, 0x1f, 0x9f, 0x27, 0xc1, 0x96, 0xfe, 0xf9, 0xce, 0x3b, 0xe6,
+	0xe7, 0x3b, 0x67, 0x0f, 0xb7, 0x7a, 0x3c, 0xfd, 0x21, 0xd0, 0x61, 0xe9, 0xa4, 0x82, 0xf2, 0xf7,
+	0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x72, 0x27, 0xde, 0x4e, 0x2a, 0x24, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// OpenoltClient is the client API for Openolt service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type OpenoltClient interface {
+	DisableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	ReenableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	ActivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	DeactivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	DeleteOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	OmciMsgOut(ctx context.Context, in *OmciMsg, opts ...grpc.CallOption) (*Empty, error)
+	OnuPacketOut(ctx context.Context, in *OnuPacket, opts ...grpc.CallOption) (*Empty, error)
+	UplinkPacketOut(ctx context.Context, in *UplinkPacket, opts ...grpc.CallOption) (*Empty, error)
+	FlowAdd(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error)
+	FlowRemove(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error)
+	HeartbeatCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Heartbeat, error)
+	EnablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error)
+	DisablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error)
+	GetDeviceInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*DeviceInfo, error)
+	Reboot(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	CollectStatistics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	CreateTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error)
+	RemoveTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error)
+	CreateTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error)
+	RemoveTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error)
+	EnableIndication(ctx context.Context, in *Empty, opts ...grpc.CallOption) (Openolt_EnableIndicationClient, error)
+}
+
+type openoltClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewOpenoltClient(cc *grpc.ClientConn) OpenoltClient {
+	return &openoltClient{cc}
+}
+
+func (c *openoltClient) DisableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DisableOlt", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) ReenableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/ReenableOlt", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) ActivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/ActivateOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DeactivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DeactivateOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DeleteOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DeleteOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) OmciMsgOut(ctx context.Context, in *OmciMsg, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/OmciMsgOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) OnuPacketOut(ctx context.Context, in *OnuPacket, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/OnuPacketOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) UplinkPacketOut(ctx context.Context, in *UplinkPacket, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/UplinkPacketOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) FlowAdd(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/FlowAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) FlowRemove(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/FlowRemove", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) HeartbeatCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Heartbeat, error) {
+	out := new(Heartbeat)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/HeartbeatCheck", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) EnablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/EnablePonIf", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DisablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DisablePonIf", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) GetDeviceInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*DeviceInfo, error) {
+	out := new(DeviceInfo)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/GetDeviceInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) Reboot(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/Reboot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CollectStatistics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CollectStatistics", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CreateTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CreateTrafficSchedulers", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) RemoveTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/RemoveTrafficSchedulers", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CreateTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CreateTrafficQueues", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) RemoveTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/RemoveTrafficQueues", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) EnableIndication(ctx context.Context, in *Empty, opts ...grpc.CallOption) (Openolt_EnableIndicationClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Openolt_serviceDesc.Streams[0], "/openolt.Openolt/EnableIndication", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &openoltEnableIndicationClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Openolt_EnableIndicationClient interface {
+	Recv() (*Indication, error)
+	grpc.ClientStream
+}
+
+type openoltEnableIndicationClient struct {
+	grpc.ClientStream
+}
+
+func (x *openoltEnableIndicationClient) Recv() (*Indication, error) {
+	m := new(Indication)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// OpenoltServer is the server API for Openolt service.
+type OpenoltServer interface {
+	DisableOlt(context.Context, *Empty) (*Empty, error)
+	ReenableOlt(context.Context, *Empty) (*Empty, error)
+	ActivateOnu(context.Context, *Onu) (*Empty, error)
+	DeactivateOnu(context.Context, *Onu) (*Empty, error)
+	DeleteOnu(context.Context, *Onu) (*Empty, error)
+	OmciMsgOut(context.Context, *OmciMsg) (*Empty, error)
+	OnuPacketOut(context.Context, *OnuPacket) (*Empty, error)
+	UplinkPacketOut(context.Context, *UplinkPacket) (*Empty, error)
+	FlowAdd(context.Context, *Flow) (*Empty, error)
+	FlowRemove(context.Context, *Flow) (*Empty, error)
+	HeartbeatCheck(context.Context, *Empty) (*Heartbeat, error)
+	EnablePonIf(context.Context, *Interface) (*Empty, error)
+	DisablePonIf(context.Context, *Interface) (*Empty, error)
+	GetDeviceInfo(context.Context, *Empty) (*DeviceInfo, error)
+	Reboot(context.Context, *Empty) (*Empty, error)
+	CollectStatistics(context.Context, *Empty) (*Empty, error)
+	CreateTrafficSchedulers(context.Context, *tech_profile.TrafficSchedulers) (*Empty, error)
+	RemoveTrafficSchedulers(context.Context, *tech_profile.TrafficSchedulers) (*Empty, error)
+	CreateTrafficQueues(context.Context, *tech_profile.TrafficQueues) (*Empty, error)
+	RemoveTrafficQueues(context.Context, *tech_profile.TrafficQueues) (*Empty, error)
+	EnableIndication(*Empty, Openolt_EnableIndicationServer) error
+}
+
+func RegisterOpenoltServer(s *grpc.Server, srv OpenoltServer) {
+	s.RegisterService(&_Openolt_serviceDesc, srv)
+}
+
+func _Openolt_DisableOlt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DisableOlt(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DisableOlt",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DisableOlt(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_ReenableOlt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).ReenableOlt(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/ReenableOlt",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).ReenableOlt(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_ActivateOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).ActivateOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/ActivateOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).ActivateOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DeactivateOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DeactivateOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DeactivateOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DeactivateOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DeleteOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DeleteOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DeleteOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DeleteOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_OmciMsgOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OmciMsg)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).OmciMsgOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/OmciMsgOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).OmciMsgOut(ctx, req.(*OmciMsg))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_OnuPacketOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OnuPacket)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).OnuPacketOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/OnuPacketOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).OnuPacketOut(ctx, req.(*OnuPacket))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_UplinkPacketOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UplinkPacket)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).UplinkPacketOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/UplinkPacketOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).UplinkPacketOut(ctx, req.(*UplinkPacket))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_FlowAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Flow)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).FlowAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/FlowAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).FlowAdd(ctx, req.(*Flow))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_FlowRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Flow)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).FlowRemove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/FlowRemove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).FlowRemove(ctx, req.(*Flow))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_HeartbeatCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).HeartbeatCheck(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/HeartbeatCheck",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).HeartbeatCheck(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_EnablePonIf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Interface)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).EnablePonIf(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/EnablePonIf",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).EnablePonIf(ctx, req.(*Interface))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DisablePonIf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Interface)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DisablePonIf(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DisablePonIf",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DisablePonIf(ctx, req.(*Interface))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_GetDeviceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).GetDeviceInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/GetDeviceInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).GetDeviceInfo(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_Reboot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).Reboot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/Reboot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).Reboot(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CollectStatistics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CollectStatistics(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CollectStatistics",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CollectStatistics(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CreateTrafficSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficSchedulers)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CreateTrafficSchedulers(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CreateTrafficSchedulers",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CreateTrafficSchedulers(ctx, req.(*tech_profile.TrafficSchedulers))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_RemoveTrafficSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficSchedulers)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).RemoveTrafficSchedulers(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/RemoveTrafficSchedulers",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).RemoveTrafficSchedulers(ctx, req.(*tech_profile.TrafficSchedulers))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CreateTrafficQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficQueues)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CreateTrafficQueues(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CreateTrafficQueues",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CreateTrafficQueues(ctx, req.(*tech_profile.TrafficQueues))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_RemoveTrafficQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficQueues)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).RemoveTrafficQueues(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/RemoveTrafficQueues",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).RemoveTrafficQueues(ctx, req.(*tech_profile.TrafficQueues))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_EnableIndication_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(OpenoltServer).EnableIndication(m, &openoltEnableIndicationServer{stream})
+}
+
+type Openolt_EnableIndicationServer interface {
+	Send(*Indication) error
+	grpc.ServerStream
+}
+
+type openoltEnableIndicationServer struct {
+	grpc.ServerStream
+}
+
+func (x *openoltEnableIndicationServer) Send(m *Indication) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+var _Openolt_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "openolt.Openolt",
+	HandlerType: (*OpenoltServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "DisableOlt",
+			Handler:    _Openolt_DisableOlt_Handler,
+		},
+		{
+			MethodName: "ReenableOlt",
+			Handler:    _Openolt_ReenableOlt_Handler,
+		},
+		{
+			MethodName: "ActivateOnu",
+			Handler:    _Openolt_ActivateOnu_Handler,
+		},
+		{
+			MethodName: "DeactivateOnu",
+			Handler:    _Openolt_DeactivateOnu_Handler,
+		},
+		{
+			MethodName: "DeleteOnu",
+			Handler:    _Openolt_DeleteOnu_Handler,
+		},
+		{
+			MethodName: "OmciMsgOut",
+			Handler:    _Openolt_OmciMsgOut_Handler,
+		},
+		{
+			MethodName: "OnuPacketOut",
+			Handler:    _Openolt_OnuPacketOut_Handler,
+		},
+		{
+			MethodName: "UplinkPacketOut",
+			Handler:    _Openolt_UplinkPacketOut_Handler,
+		},
+		{
+			MethodName: "FlowAdd",
+			Handler:    _Openolt_FlowAdd_Handler,
+		},
+		{
+			MethodName: "FlowRemove",
+			Handler:    _Openolt_FlowRemove_Handler,
+		},
+		{
+			MethodName: "HeartbeatCheck",
+			Handler:    _Openolt_HeartbeatCheck_Handler,
+		},
+		{
+			MethodName: "EnablePonIf",
+			Handler:    _Openolt_EnablePonIf_Handler,
+		},
+		{
+			MethodName: "DisablePonIf",
+			Handler:    _Openolt_DisablePonIf_Handler,
+		},
+		{
+			MethodName: "GetDeviceInfo",
+			Handler:    _Openolt_GetDeviceInfo_Handler,
+		},
+		{
+			MethodName: "Reboot",
+			Handler:    _Openolt_Reboot_Handler,
+		},
+		{
+			MethodName: "CollectStatistics",
+			Handler:    _Openolt_CollectStatistics_Handler,
+		},
+		{
+			MethodName: "CreateTrafficSchedulers",
+			Handler:    _Openolt_CreateTrafficSchedulers_Handler,
+		},
+		{
+			MethodName: "RemoveTrafficSchedulers",
+			Handler:    _Openolt_RemoveTrafficSchedulers_Handler,
+		},
+		{
+			MethodName: "CreateTrafficQueues",
+			Handler:    _Openolt_CreateTrafficQueues_Handler,
+		},
+		{
+			MethodName: "RemoveTrafficQueues",
+			Handler:    _Openolt_RemoveTrafficQueues_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "EnableIndication",
+			Handler:       _Openolt_EnableIndication_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "voltha_protos/openolt.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v2/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/v2/go/tech_profile/tech_profile.pb.go
new file mode 100644
index 0000000..5a78e1e
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v2/go/tech_profile/tech_profile.pb.go
@@ -0,0 +1,972 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/tech_profile.proto
+
+package tech_profile
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Direction int32
+
+const (
+	Direction_UPSTREAM      Direction = 0
+	Direction_DOWNSTREAM    Direction = 1
+	Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[int32]string{
+	0: "UPSTREAM",
+	1: "DOWNSTREAM",
+	2: "BIDIRECTIONAL",
+}
+
+var Direction_value = map[string]int32{
+	"UPSTREAM":      0,
+	"DOWNSTREAM":    1,
+	"BIDIRECTIONAL": 2,
+}
+
+func (x Direction) String() string {
+	return proto.EnumName(Direction_name, int32(x))
+}
+
+func (Direction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{0}
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[int32]string{
+	0: "WRR",
+	1: "StrictPriority",
+	2: "Hybrid",
+}
+
+var SchedulingPolicy_value = map[string]int32{
+	"WRR":            0,
+	"StrictPriority": 1,
+	"Hybrid":         2,
+}
+
+func (x SchedulingPolicy) String() string {
+	return proto.EnumName(SchedulingPolicy_name, int32(x))
+}
+
+func (SchedulingPolicy) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{1}
+}
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[int32]string{
+	0: "AdditionalBW_None",
+	1: "AdditionalBW_NA",
+	2: "AdditionalBW_BestEffort",
+	3: "AdditionalBW_Auto",
+}
+
+var AdditionalBW_value = map[string]int32{
+	"AdditionalBW_None":       0,
+	"AdditionalBW_NA":         1,
+	"AdditionalBW_BestEffort": 2,
+	"AdditionalBW_Auto":       3,
+}
+
+func (x AdditionalBW) String() string {
+	return proto.EnumName(AdditionalBW_name, int32(x))
+}
+
+func (AdditionalBW) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{2}
+}
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[int32]string{
+	0: "TailDrop",
+	1: "WTailDrop",
+	2: "Red",
+	3: "WRed",
+}
+
+var DiscardPolicy_value = map[string]int32{
+	"TailDrop":  0,
+	"WTailDrop": 1,
+	"Red":       2,
+	"WRed":      3,
+}
+
+func (x DiscardPolicy) String() string {
+	return proto.EnumName(DiscardPolicy_name, int32(x))
+}
+
+func (DiscardPolicy) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{3}
+}
+
+type InferredAdditionBWIndication int32
+
+const (
+	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
+	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
+	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+	0: "InferredAdditionBWIndication_None",
+	1: "InferredAdditionBWIndication_Assured",
+	2: "InferredAdditionBWIndication_BestEffort",
+}
+
+var InferredAdditionBWIndication_value = map[string]int32{
+	"InferredAdditionBWIndication_None":       0,
+	"InferredAdditionBWIndication_Assured":    1,
+	"InferredAdditionBWIndication_BestEffort": 2,
+}
+
+func (x InferredAdditionBWIndication) String() string {
+	return proto.EnumName(InferredAdditionBWIndication_name, int32(x))
+}
+
+func (InferredAdditionBWIndication) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{4}
+}
+
+type SchedulerConfig struct {
+	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	AdditionalBw         AdditionalBW     `protobuf:"varint,2,opt,name=additional_bw,json=additionalBw,proto3,enum=tech_profile.AdditionalBW" json:"additional_bw,omitempty"`
+	Priority             uint32           `protobuf:"fixed32,3,opt,name=priority,proto3" json:"priority,omitempty"`
+	Weight               uint32           `protobuf:"fixed32,4,opt,name=weight,proto3" json:"weight,omitempty"`
+	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SchedulerConfig) Reset()         { *m = SchedulerConfig{} }
+func (m *SchedulerConfig) String() string { return proto.CompactTextString(m) }
+func (*SchedulerConfig) ProtoMessage()    {}
+func (*SchedulerConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{0}
+}
+
+func (m *SchedulerConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SchedulerConfig.Unmarshal(m, b)
+}
+func (m *SchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SchedulerConfig.Marshal(b, m, deterministic)
+}
+func (m *SchedulerConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SchedulerConfig.Merge(m, src)
+}
+func (m *SchedulerConfig) XXX_Size() int {
+	return xxx_messageInfo_SchedulerConfig.Size(m)
+}
+func (m *SchedulerConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_SchedulerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SchedulerConfig proto.InternalMessageInfo
+
+func (m *SchedulerConfig) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *SchedulerConfig) GetAdditionalBw() AdditionalBW {
+	if m != nil {
+		return m.AdditionalBw
+	}
+	return AdditionalBW_AdditionalBW_None
+}
+
+func (m *SchedulerConfig) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *SchedulerConfig) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *SchedulerConfig) GetSchedPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+type TrafficShapingInfo struct {
+	Cir                  uint32                       `protobuf:"fixed32,1,opt,name=cir,proto3" json:"cir,omitempty"`
+	Cbs                  uint32                       `protobuf:"fixed32,2,opt,name=cbs,proto3" json:"cbs,omitempty"`
+	Pir                  uint32                       `protobuf:"fixed32,3,opt,name=pir,proto3" json:"pir,omitempty"`
+	Pbs                  uint32                       `protobuf:"fixed32,4,opt,name=pbs,proto3" json:"pbs,omitempty"`
+	Gir                  uint32                       `protobuf:"fixed32,5,opt,name=gir,proto3" json:"gir,omitempty"`
+	AddBwInd             InferredAdditionBWIndication `protobuf:"varint,6,opt,name=add_bw_ind,json=addBwInd,proto3,enum=tech_profile.InferredAdditionBWIndication" json:"add_bw_ind,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *TrafficShapingInfo) Reset()         { *m = TrafficShapingInfo{} }
+func (m *TrafficShapingInfo) String() string { return proto.CompactTextString(m) }
+func (*TrafficShapingInfo) ProtoMessage()    {}
+func (*TrafficShapingInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{1}
+}
+
+func (m *TrafficShapingInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficShapingInfo.Unmarshal(m, b)
+}
+func (m *TrafficShapingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficShapingInfo.Marshal(b, m, deterministic)
+}
+func (m *TrafficShapingInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficShapingInfo.Merge(m, src)
+}
+func (m *TrafficShapingInfo) XXX_Size() int {
+	return xxx_messageInfo_TrafficShapingInfo.Size(m)
+}
+func (m *TrafficShapingInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficShapingInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficShapingInfo proto.InternalMessageInfo
+
+func (m *TrafficShapingInfo) GetCir() uint32 {
+	if m != nil {
+		return m.Cir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetCbs() uint32 {
+	if m != nil {
+		return m.Cbs
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetPir() uint32 {
+	if m != nil {
+		return m.Pir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetPbs() uint32 {
+	if m != nil {
+		return m.Pbs
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetGir() uint32 {
+	if m != nil {
+		return m.Gir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetAddBwInd() InferredAdditionBWIndication {
+	if m != nil {
+		return m.AddBwInd
+	}
+	return InferredAdditionBWIndication_InferredAdditionBWIndication_None
+}
+
+type TrafficScheduler struct {
+	Direction            Direction           `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	AllocId              uint32              `protobuf:"fixed32,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	Scheduler            *SchedulerConfig    `protobuf:"bytes,3,opt,name=scheduler,proto3" json:"scheduler,omitempty"`
+	TrafficShapingInfo   *TrafficShapingInfo `protobuf:"bytes,4,opt,name=traffic_shaping_info,json=trafficShapingInfo,proto3" json:"traffic_shaping_info,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *TrafficScheduler) Reset()         { *m = TrafficScheduler{} }
+func (m *TrafficScheduler) String() string { return proto.CompactTextString(m) }
+func (*TrafficScheduler) ProtoMessage()    {}
+func (*TrafficScheduler) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{2}
+}
+
+func (m *TrafficScheduler) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficScheduler.Unmarshal(m, b)
+}
+func (m *TrafficScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficScheduler.Marshal(b, m, deterministic)
+}
+func (m *TrafficScheduler) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficScheduler.Merge(m, src)
+}
+func (m *TrafficScheduler) XXX_Size() int {
+	return xxx_messageInfo_TrafficScheduler.Size(m)
+}
+func (m *TrafficScheduler) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficScheduler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficScheduler proto.InternalMessageInfo
+
+func (m *TrafficScheduler) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *TrafficScheduler) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *TrafficScheduler) GetScheduler() *SchedulerConfig {
+	if m != nil {
+		return m.Scheduler
+	}
+	return nil
+}
+
+func (m *TrafficScheduler) GetTrafficShapingInfo() *TrafficShapingInfo {
+	if m != nil {
+		return m.TrafficShapingInfo
+	}
+	return nil
+}
+
+type TrafficSchedulers struct {
+	IntfId               uint32              `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32              `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32              `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	PortNo               uint32              `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	TrafficScheds        []*TrafficScheduler `protobuf:"bytes,3,rep,name=traffic_scheds,json=trafficScheds,proto3" json:"traffic_scheds,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *TrafficSchedulers) Reset()         { *m = TrafficSchedulers{} }
+func (m *TrafficSchedulers) String() string { return proto.CompactTextString(m) }
+func (*TrafficSchedulers) ProtoMessage()    {}
+func (*TrafficSchedulers) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{3}
+}
+
+func (m *TrafficSchedulers) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficSchedulers.Unmarshal(m, b)
+}
+func (m *TrafficSchedulers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficSchedulers.Marshal(b, m, deterministic)
+}
+func (m *TrafficSchedulers) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficSchedulers.Merge(m, src)
+}
+func (m *TrafficSchedulers) XXX_Size() int {
+	return xxx_messageInfo_TrafficSchedulers.Size(m)
+}
+func (m *TrafficSchedulers) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficSchedulers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficSchedulers proto.InternalMessageInfo
+
+func (m *TrafficSchedulers) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetTrafficScheds() []*TrafficScheduler {
+	if m != nil {
+		return m.TrafficScheds
+	}
+	return nil
+}
+
+type TailDropDiscardConfig struct {
+	QueueSize            uint32   `protobuf:"fixed32,1,opt,name=queue_size,json=queueSize,proto3" json:"queue_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *TailDropDiscardConfig) Reset()         { *m = TailDropDiscardConfig{} }
+func (m *TailDropDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*TailDropDiscardConfig) ProtoMessage()    {}
+func (*TailDropDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{4}
+}
+
+func (m *TailDropDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TailDropDiscardConfig.Unmarshal(m, b)
+}
+func (m *TailDropDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TailDropDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *TailDropDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TailDropDiscardConfig.Merge(m, src)
+}
+func (m *TailDropDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_TailDropDiscardConfig.Size(m)
+}
+func (m *TailDropDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_TailDropDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailDropDiscardConfig proto.InternalMessageInfo
+
+func (m *TailDropDiscardConfig) GetQueueSize() uint32 {
+	if m != nil {
+		return m.QueueSize
+	}
+	return 0
+}
+
+type RedDiscardConfig struct {
+	MinThreshold         uint32   `protobuf:"fixed32,1,opt,name=min_threshold,json=minThreshold,proto3" json:"min_threshold,omitempty"`
+	MaxThreshold         uint32   `protobuf:"fixed32,2,opt,name=max_threshold,json=maxThreshold,proto3" json:"max_threshold,omitempty"`
+	MaxProbability       uint32   `protobuf:"fixed32,3,opt,name=max_probability,json=maxProbability,proto3" json:"max_probability,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RedDiscardConfig) Reset()         { *m = RedDiscardConfig{} }
+func (m *RedDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*RedDiscardConfig) ProtoMessage()    {}
+func (*RedDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{5}
+}
+
+func (m *RedDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_RedDiscardConfig.Unmarshal(m, b)
+}
+func (m *RedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_RedDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *RedDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RedDiscardConfig.Merge(m, src)
+}
+func (m *RedDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_RedDiscardConfig.Size(m)
+}
+func (m *RedDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_RedDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RedDiscardConfig proto.InternalMessageInfo
+
+func (m *RedDiscardConfig) GetMinThreshold() uint32 {
+	if m != nil {
+		return m.MinThreshold
+	}
+	return 0
+}
+
+func (m *RedDiscardConfig) GetMaxThreshold() uint32 {
+	if m != nil {
+		return m.MaxThreshold
+	}
+	return 0
+}
+
+func (m *RedDiscardConfig) GetMaxProbability() uint32 {
+	if m != nil {
+		return m.MaxProbability
+	}
+	return 0
+}
+
+type WRedDiscardConfig struct {
+	Green                *RedDiscardConfig `protobuf:"bytes,1,opt,name=green,proto3" json:"green,omitempty"`
+	Yellow               *RedDiscardConfig `protobuf:"bytes,2,opt,name=yellow,proto3" json:"yellow,omitempty"`
+	Red                  *RedDiscardConfig `protobuf:"bytes,3,opt,name=red,proto3" json:"red,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *WRedDiscardConfig) Reset()         { *m = WRedDiscardConfig{} }
+func (m *WRedDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*WRedDiscardConfig) ProtoMessage()    {}
+func (*WRedDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{6}
+}
+
+func (m *WRedDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_WRedDiscardConfig.Unmarshal(m, b)
+}
+func (m *WRedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_WRedDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *WRedDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WRedDiscardConfig.Merge(m, src)
+}
+func (m *WRedDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_WRedDiscardConfig.Size(m)
+}
+func (m *WRedDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_WRedDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WRedDiscardConfig proto.InternalMessageInfo
+
+func (m *WRedDiscardConfig) GetGreen() *RedDiscardConfig {
+	if m != nil {
+		return m.Green
+	}
+	return nil
+}
+
+func (m *WRedDiscardConfig) GetYellow() *RedDiscardConfig {
+	if m != nil {
+		return m.Yellow
+	}
+	return nil
+}
+
+func (m *WRedDiscardConfig) GetRed() *RedDiscardConfig {
+	if m != nil {
+		return m.Red
+	}
+	return nil
+}
+
+type DiscardConfig struct {
+	DiscardPolicy DiscardPolicy `protobuf:"varint,1,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	// Types that are valid to be assigned to DiscardConfig:
+	//	*DiscardConfig_TailDropDiscardConfig
+	//	*DiscardConfig_RedDiscardConfig
+	//	*DiscardConfig_WredDiscardConfig
+	DiscardConfig        isDiscardConfig_DiscardConfig `protobuf_oneof:"discard_config"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *DiscardConfig) Reset()         { *m = DiscardConfig{} }
+func (m *DiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*DiscardConfig) ProtoMessage()    {}
+func (*DiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{7}
+}
+
+func (m *DiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DiscardConfig.Unmarshal(m, b)
+}
+func (m *DiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *DiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DiscardConfig.Merge(m, src)
+}
+func (m *DiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_DiscardConfig.Size(m)
+}
+func (m *DiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_DiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DiscardConfig proto.InternalMessageInfo
+
+func (m *DiscardConfig) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+type isDiscardConfig_DiscardConfig interface {
+	isDiscardConfig_DiscardConfig()
+}
+
+type DiscardConfig_TailDropDiscardConfig struct {
+	TailDropDiscardConfig *TailDropDiscardConfig `protobuf:"bytes,2,opt,name=tail_drop_discard_config,json=tailDropDiscardConfig,proto3,oneof"`
+}
+
+type DiscardConfig_RedDiscardConfig struct {
+	RedDiscardConfig *RedDiscardConfig `protobuf:"bytes,3,opt,name=red_discard_config,json=redDiscardConfig,proto3,oneof"`
+}
+
+type DiscardConfig_WredDiscardConfig struct {
+	WredDiscardConfig *WRedDiscardConfig `protobuf:"bytes,4,opt,name=wred_discard_config,json=wredDiscardConfig,proto3,oneof"`
+}
+
+func (*DiscardConfig_TailDropDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (*DiscardConfig_RedDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (*DiscardConfig_WredDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (m *DiscardConfig) GetDiscardConfig() isDiscardConfig_DiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetTailDropDiscardConfig() *TailDropDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_TailDropDiscardConfig); ok {
+		return x.TailDropDiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetRedDiscardConfig() *RedDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_RedDiscardConfig); ok {
+		return x.RedDiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetWredDiscardConfig() *WRedDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_WredDiscardConfig); ok {
+		return x.WredDiscardConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DiscardConfig) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*DiscardConfig_TailDropDiscardConfig)(nil),
+		(*DiscardConfig_RedDiscardConfig)(nil),
+		(*DiscardConfig_WredDiscardConfig)(nil),
+	}
+}
+
+type TrafficQueue struct {
+	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	GemportId            uint32           `protobuf:"fixed32,2,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	PbitMap              string           `protobuf:"bytes,3,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+	AesEncryption        bool             `protobuf:"varint,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
+	Priority             uint32           `protobuf:"fixed32,6,opt,name=priority,proto3" json:"priority,omitempty"`
+	Weight               uint32           `protobuf:"fixed32,7,opt,name=weight,proto3" json:"weight,omitempty"`
+	DiscardPolicy        DiscardPolicy    `protobuf:"varint,8,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	DiscardConfig        *DiscardConfig   `protobuf:"bytes,9,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *TrafficQueue) Reset()         { *m = TrafficQueue{} }
+func (m *TrafficQueue) String() string { return proto.CompactTextString(m) }
+func (*TrafficQueue) ProtoMessage()    {}
+func (*TrafficQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{8}
+}
+
+func (m *TrafficQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficQueue.Unmarshal(m, b)
+}
+func (m *TrafficQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficQueue.Marshal(b, m, deterministic)
+}
+func (m *TrafficQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficQueue.Merge(m, src)
+}
+func (m *TrafficQueue) XXX_Size() int {
+	return xxx_messageInfo_TrafficQueue.Size(m)
+}
+func (m *TrafficQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficQueue proto.InternalMessageInfo
+
+func (m *TrafficQueue) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *TrafficQueue) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetPbitMap() string {
+	if m != nil {
+		return m.PbitMap
+	}
+	return ""
+}
+
+func (m *TrafficQueue) GetAesEncryption() bool {
+	if m != nil {
+		return m.AesEncryption
+	}
+	return false
+}
+
+func (m *TrafficQueue) GetSchedPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+func (m *TrafficQueue) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+func (m *TrafficQueue) GetDiscardConfig() *DiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+type TrafficQueues struct {
+	IntfId               uint32          `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32          `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32          `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	PortNo               uint32          `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	TrafficQueues        []*TrafficQueue `protobuf:"bytes,6,rep,name=traffic_queues,json=trafficQueues,proto3" json:"traffic_queues,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *TrafficQueues) Reset()         { *m = TrafficQueues{} }
+func (m *TrafficQueues) String() string { return proto.CompactTextString(m) }
+func (*TrafficQueues) ProtoMessage()    {}
+func (*TrafficQueues) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{9}
+}
+
+func (m *TrafficQueues) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficQueues.Unmarshal(m, b)
+}
+func (m *TrafficQueues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficQueues.Marshal(b, m, deterministic)
+}
+func (m *TrafficQueues) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficQueues.Merge(m, src)
+}
+func (m *TrafficQueues) XXX_Size() int {
+	return xxx_messageInfo_TrafficQueues.Size(m)
+}
+func (m *TrafficQueues) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficQueues.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficQueues proto.InternalMessageInfo
+
+func (m *TrafficQueues) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetTrafficQueues() []*TrafficQueue {
+	if m != nil {
+		return m.TrafficQueues
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("tech_profile.Direction", Direction_name, Direction_value)
+	proto.RegisterEnum("tech_profile.SchedulingPolicy", SchedulingPolicy_name, SchedulingPolicy_value)
+	proto.RegisterEnum("tech_profile.AdditionalBW", AdditionalBW_name, AdditionalBW_value)
+	proto.RegisterEnum("tech_profile.DiscardPolicy", DiscardPolicy_name, DiscardPolicy_value)
+	proto.RegisterEnum("tech_profile.InferredAdditionBWIndication", InferredAdditionBWIndication_name, InferredAdditionBWIndication_value)
+	proto.RegisterType((*SchedulerConfig)(nil), "tech_profile.SchedulerConfig")
+	proto.RegisterType((*TrafficShapingInfo)(nil), "tech_profile.TrafficShapingInfo")
+	proto.RegisterType((*TrafficScheduler)(nil), "tech_profile.TrafficScheduler")
+	proto.RegisterType((*TrafficSchedulers)(nil), "tech_profile.TrafficSchedulers")
+	proto.RegisterType((*TailDropDiscardConfig)(nil), "tech_profile.TailDropDiscardConfig")
+	proto.RegisterType((*RedDiscardConfig)(nil), "tech_profile.RedDiscardConfig")
+	proto.RegisterType((*WRedDiscardConfig)(nil), "tech_profile.WRedDiscardConfig")
+	proto.RegisterType((*DiscardConfig)(nil), "tech_profile.DiscardConfig")
+	proto.RegisterType((*TrafficQueue)(nil), "tech_profile.TrafficQueue")
+	proto.RegisterType((*TrafficQueues)(nil), "tech_profile.TrafficQueues")
+}
+
+func init() { proto.RegisterFile("voltha_protos/tech_profile.proto", fileDescriptor_d019a68bffe14cae) }
+
+var fileDescriptor_d019a68bffe14cae = []byte{
+	// 1106 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
+	0x14, 0xf6, 0xda, 0x8d, 0x7f, 0x4e, 0x6c, 0x77, 0x33, 0x25, 0xc4, 0xa4, 0x0d, 0x04, 0x97, 0xaa,
+	0x91, 0x11, 0x31, 0x0a, 0x25, 0x37, 0x45, 0xaa, 0xec, 0x26, 0x52, 0x2c, 0xd1, 0x34, 0xdd, 0x04,
+	0x59, 0xe2, 0x82, 0xd5, 0x78, 0x67, 0x6c, 0x8f, 0xb4, 0x9e, 0x59, 0x66, 0xc7, 0x75, 0xd2, 0x2b,
+	0x6e, 0x78, 0x0b, 0x6e, 0x79, 0x01, 0xb8, 0x41, 0x3c, 0x11, 0x2f, 0xc0, 0x3d, 0x9a, 0xd9, 0x5d,
+	0xdb, 0xbb, 0x36, 0x29, 0x54, 0x70, 0x37, 0xe7, 0xdb, 0x6f, 0xce, 0x9c, 0x6f, 0xce, 0xcf, 0x0e,
+	0xec, 0xbf, 0x16, 0xbe, 0x1a, 0x63, 0x37, 0x90, 0x42, 0x89, 0xb0, 0xad, 0xa8, 0x37, 0xd6, 0xeb,
+	0x21, 0xf3, 0xe9, 0xa1, 0xc1, 0x50, 0x75, 0x19, 0xdb, 0x7d, 0x30, 0x12, 0x62, 0xe4, 0xd3, 0x36,
+	0x0e, 0x58, 0x1b, 0x73, 0x2e, 0x14, 0x56, 0x4c, 0xf0, 0x30, 0xe2, 0x36, 0x7f, 0xc8, 0xc3, 0xdd,
+	0x4b, 0x6f, 0x4c, 0xc9, 0xd4, 0xa7, 0xf2, 0xb9, 0xe0, 0x43, 0x36, 0x42, 0x5f, 0x42, 0x85, 0x30,
+	0x49, 0x3d, 0xcd, 0x6b, 0x58, 0xfb, 0xd6, 0x41, 0xfd, 0x68, 0xe7, 0x30, 0x75, 0xce, 0x49, 0xf2,
+	0xd9, 0x59, 0x30, 0xd1, 0x33, 0xa8, 0x61, 0x42, 0x98, 0x5e, 0x63, 0xdf, 0x1d, 0xcc, 0x1a, 0x79,
+	0xb3, 0x75, 0x37, 0xbd, 0xb5, 0x33, 0xa7, 0x74, 0xfb, 0x4e, 0x75, 0xb1, 0xa1, 0x3b, 0x43, 0xbb,
+	0x50, 0x0e, 0x24, 0x13, 0x92, 0xa9, 0x9b, 0x46, 0x61, 0xdf, 0x3a, 0x28, 0x39, 0x73, 0x1b, 0xbd,
+	0x0f, 0xc5, 0x19, 0x65, 0xa3, 0xb1, 0x6a, 0xdc, 0x31, 0x5f, 0x62, 0x0b, 0x75, 0xa0, 0x1a, 0xea,
+	0xf0, 0xdd, 0x40, 0xf8, 0xcc, 0xbb, 0x69, 0x6c, 0x98, 0x33, 0x3f, 0x4c, 0x9f, 0x19, 0x0b, 0x64,
+	0x7c, 0x74, 0x61, 0x58, 0xce, 0xa6, 0xd9, 0x13, 0x19, 0xcd, 0xdf, 0x2c, 0x40, 0x57, 0x12, 0x0f,
+	0x87, 0xcc, 0xbb, 0x1c, 0xe3, 0x80, 0xf1, 0x51, 0x8f, 0x0f, 0x05, 0xb2, 0xa1, 0xe0, 0x31, 0x69,
+	0xf4, 0x97, 0x1c, 0xbd, 0x34, 0xc8, 0x20, 0x34, 0xb2, 0x34, 0x32, 0x08, 0x35, 0x12, 0x30, 0x19,
+	0x07, 0xab, 0x97, 0x06, 0x19, 0x84, 0x71, 0x90, 0x7a, 0xa9, 0x91, 0x11, 0x93, 0x26, 0xb0, 0x92,
+	0xa3, 0x97, 0xe8, 0x0c, 0x00, 0x13, 0xe2, 0x0e, 0x66, 0x2e, 0xe3, 0xa4, 0x51, 0x34, 0x11, 0xb7,
+	0xd2, 0x11, 0xf7, 0xf8, 0x90, 0x4a, 0x49, 0x49, 0x72, 0x5b, 0xdd, 0x7e, 0x8f, 0x13, 0xe6, 0x99,
+	0xd4, 0x39, 0x65, 0x4c, 0x48, 0x77, 0xd6, 0xe3, 0xa4, 0xf9, 0xa7, 0x05, 0x76, 0x12, 0x7a, 0x92,
+	0xc4, 0x77, 0x4d, 0xdf, 0x07, 0x50, 0xc6, 0xbe, 0x2f, 0x3c, 0x97, 0x91, 0x58, 0x62, 0xc9, 0xd8,
+	0x3d, 0x82, 0x9e, 0x42, 0x25, 0x4c, 0xdc, 0x1b, 0xb1, 0x9b, 0x47, 0x7b, 0x6b, 0x6f, 0x38, 0x29,
+	0x21, 0x67, 0xc1, 0x47, 0x0e, 0xbc, 0xa7, 0xa2, 0x10, 0xdd, 0x30, 0xba, 0x5e, 0x97, 0xf1, 0xa1,
+	0x30, 0x57, 0xb4, 0x79, 0xb4, 0x9f, 0xf6, 0xb3, 0x9a, 0x07, 0x07, 0xa9, 0x15, 0xac, 0xf9, 0xbb,
+	0x05, 0x5b, 0x59, 0xdd, 0x21, 0xda, 0x81, 0x12, 0xe3, 0x6a, 0xa8, 0x05, 0x44, 0x59, 0x2b, 0x6a,
+	0xb3, 0x47, 0xd0, 0x36, 0x14, 0x05, 0x9f, 0x2e, 0x84, 0x6d, 0x08, 0x3e, 0x8d, 0xe0, 0x29, 0x67,
+	0x1a, 0x8e, 0xd2, 0xb5, 0x31, 0xe5, 0xac, 0x47, 0xb4, 0x9b, 0x40, 0x48, 0xe5, 0x72, 0x11, 0x27,
+	0xad, 0xa8, 0xcd, 0x73, 0x81, 0x4e, 0xa1, 0x3e, 0x57, 0xa2, 0x4f, 0x0d, 0x1b, 0x85, 0xfd, 0xc2,
+	0xc1, 0x66, 0xb6, 0xda, 0xb2, 0x81, 0x39, 0x35, 0xb5, 0x84, 0x84, 0xcd, 0x63, 0xd8, 0xbe, 0xc2,
+	0xcc, 0x3f, 0x91, 0x22, 0x38, 0x61, 0xa1, 0x87, 0x25, 0x89, 0xfb, 0x6e, 0x0f, 0xe0, 0xfb, 0x29,
+	0x9d, 0x52, 0x37, 0x64, 0x6f, 0x68, 0x2c, 0xa1, 0x62, 0x90, 0x4b, 0xf6, 0x86, 0x36, 0x7f, 0xb4,
+	0xc0, 0x76, 0x28, 0x49, 0xef, 0x79, 0x08, 0xb5, 0x09, 0xe3, 0xae, 0x1a, 0x4b, 0x1a, 0x8e, 0x85,
+	0x9f, 0x28, 0xaf, 0x4e, 0x18, 0xbf, 0x4a, 0x30, 0x43, 0xc2, 0xd7, 0x4b, 0xa4, 0x7c, 0x4c, 0xc2,
+	0xd7, 0x0b, 0xd2, 0x63, 0xb8, 0xab, 0x49, 0x81, 0x14, 0x03, 0x3c, 0x60, 0xfe, 0xa2, 0x09, 0xeb,
+	0x13, 0x7c, 0x7d, 0xb1, 0x40, 0x9b, 0xbf, 0x5a, 0xb0, 0xd5, 0x5f, 0x09, 0xe4, 0x09, 0x6c, 0x8c,
+	0x24, 0xa5, 0x51, 0xc5, 0xad, 0xdc, 0x49, 0x96, 0xee, 0x44, 0x64, 0x74, 0x0c, 0xc5, 0x1b, 0xea,
+	0xfb, 0x22, 0x1a, 0x16, 0x6f, 0xdf, 0x16, 0xb3, 0xd1, 0xe7, 0x50, 0x90, 0x94, 0xc4, 0xb5, 0xf8,
+	0xb6, 0x4d, 0x9a, 0xda, 0xfc, 0x23, 0x0f, 0xb5, 0x74, 0xc4, 0x5d, 0xa8, 0x93, 0x08, 0x48, 0x86,
+	0x47, 0xd4, 0x2c, 0xf7, 0xb3, 0xcd, 0x62, 0x38, 0xf1, 0xe4, 0xa8, 0x91, 0x65, 0x13, 0x7d, 0x07,
+	0x0d, 0x85, 0x99, 0xef, 0x12, 0x29, 0x02, 0x37, 0xf1, 0xe6, 0x19, 0xff, 0xb1, 0xa2, 0x87, 0x99,
+	0xe2, 0x58, 0x97, 0xf9, 0xb3, 0x9c, 0xb3, 0xad, 0xd6, 0x96, 0xc4, 0x39, 0x20, 0x49, 0x49, 0xd6,
+	0xf3, 0x3f, 0x92, 0x7d, 0x96, 0x73, 0x6c, 0x99, 0xcd, 0xd2, 0x2b, 0xb8, 0x37, 0x5b, 0xe3, 0x30,
+	0xea, 0xc5, 0x8f, 0xd2, 0x0e, 0xfb, 0x6b, 0x3c, 0x6e, 0xcd, 0xb2, 0x2e, 0xbb, 0xf6, 0xe2, 0x1a,
+	0x23, 0x6f, 0xcd, 0x9f, 0x0b, 0x50, 0x8d, 0x9b, 0xe0, 0x95, 0xae, 0xde, 0x77, 0x9d, 0x48, 0x7b,
+	0x00, 0x23, 0x3a, 0x31, 0xbd, 0x38, 0x6f, 0xdd, 0x4a, 0x8c, 0xf4, 0x88, 0x1e, 0x58, 0xc1, 0x80,
+	0x29, 0x77, 0x82, 0x03, 0x73, 0x23, 0x15, 0xa7, 0xa4, 0xed, 0x17, 0x38, 0x40, 0x8f, 0xa0, 0x8e,
+	0x69, 0xe8, 0x52, 0xee, 0xc9, 0x9b, 0xc0, 0x9c, 0xaa, 0x15, 0x96, 0x9d, 0x1a, 0xa6, 0xe1, 0xe9,
+	0x1c, 0xfc, 0x0f, 0x7e, 0x1e, 0xa9, 0x7f, 0x56, 0xf1, 0x6f, 0xff, 0x59, 0xa5, 0xd4, 0x3f, 0x6b,
+	0xb5, 0xf0, 0xca, 0xff, 0xba, 0xf0, 0xba, 0xd9, 0x5b, 0x6f, 0x54, 0x4c, 0x0e, 0xd7, 0xfb, 0x88,
+	0x1b, 0x21, 0xf1, 0x11, 0x99, 0xcd, 0x5f, 0x2c, 0xa8, 0x2d, 0xe7, 0xe9, 0xff, 0x9f, 0xa0, 0x9d,
+	0xc5, 0x04, 0x35, 0x73, 0x2d, 0x6c, 0x14, 0xcd, 0x04, 0xdd, 0x5d, 0x3b, 0x41, 0x4d, 0x50, 0xf3,
+	0xe9, 0x19, 0x85, 0xd8, 0xfa, 0x0a, 0x2a, 0xf3, 0x62, 0x41, 0x55, 0x28, 0x7f, 0x73, 0x71, 0x79,
+	0xe5, 0x9c, 0x76, 0x5e, 0xd8, 0x39, 0x54, 0x07, 0x38, 0x79, 0xd9, 0x3f, 0x8f, 0x6d, 0x0b, 0x6d,
+	0x41, 0xad, 0xdb, 0x3b, 0xe9, 0x39, 0xa7, 0xcf, 0xaf, 0x7a, 0x2f, 0xcf, 0x3b, 0x5f, 0xdb, 0xf9,
+	0xd6, 0x53, 0xb0, 0xb3, 0xf9, 0x44, 0x25, 0x28, 0xf4, 0x1d, 0xc7, 0xce, 0x21, 0x04, 0xf5, 0x4b,
+	0x25, 0x99, 0xa7, 0x2e, 0xe2, 0x0c, 0xda, 0x16, 0x02, 0x28, 0x9e, 0xdd, 0x0c, 0x24, 0x23, 0x76,
+	0xbe, 0xc5, 0xa1, 0xba, 0xfc, 0x7a, 0x41, 0xdb, 0xb0, 0xb5, 0x6c, 0xbb, 0xe7, 0x82, 0x53, 0x3b,
+	0x87, 0xee, 0xc1, 0xdd, 0x34, 0xdc, 0xb1, 0x2d, 0x74, 0x1f, 0x76, 0x52, 0x60, 0x97, 0x86, 0xea,
+	0x74, 0x38, 0x14, 0x52, 0xd9, 0xf9, 0x15, 0x47, 0x9d, 0xa9, 0x12, 0x76, 0xa1, 0xf5, 0x6c, 0x3e,
+	0xb1, 0xe2, 0x48, 0xab, 0x50, 0x4e, 0xe6, 0x87, 0x9d, 0x43, 0x35, 0xa8, 0xf4, 0xe7, 0xa6, 0xa5,
+	0x65, 0x38, 0x94, 0xd8, 0x79, 0x54, 0x86, 0x3b, 0xba, 0x75, 0xed, 0x42, 0xeb, 0x27, 0x0b, 0x1e,
+	0xdc, 0xf6, 0x92, 0x40, 0x8f, 0xe0, 0xe3, 0xdb, 0xbe, 0x27, 0x8a, 0x0e, 0xe0, 0x93, 0x5b, 0x69,
+	0x9d, 0x30, 0x9c, 0x4a, 0x4a, 0x6c, 0x0b, 0x7d, 0x0a, 0x8f, 0x6f, 0x65, 0x2e, 0xcb, 0xee, 0x1e,
+	0x7f, 0xfb, 0x64, 0xc4, 0xd4, 0x78, 0x3a, 0x38, 0xf4, 0xc4, 0xa4, 0x2d, 0x02, 0xca, 0x3d, 0x21,
+	0x49, 0x3b, 0x7a, 0xdf, 0x7e, 0x16, 0xbf, 0x6f, 0x5f, 0x1f, 0xb5, 0x47, 0x22, 0xf5, 0xca, 0x1d,
+	0x14, 0xcd, 0xa7, 0x2f, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x92, 0xf8, 0xb6, 0xb7, 0x0a, 0x0b,
+	0x00, 0x00,
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..d4b9266
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+  - 1.4.x
+  - 1.5.x
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - tip
+
+script:
+  - go test -v ./...
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..6483ba2
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,52 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+        return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+        return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+        Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+        // handle specifically
+default:
+        // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+  # some helpful output for debugging builds
+  - go version
+  - go env
+  # pre-installed MinGW at C:\MinGW is 32bit only
+  # but MSYS2 at C:\msys64 has mingw64
+  - set PATH=C:\msys64\mingw64\bin;%PATH%
+  - gcc --version
+  - g++ --version
+
+build_script:
+  - go install -v ./...
+
+test_script:
+  - set PATH=C:\gopath\bin;%PATH%
+  - go test -v ./...
+
+#artifacts:
+#  - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..7421f32
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,282 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+//     if err != nil {
+//             return err
+//     }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+//     _, err := ioutil.ReadAll(r)
+//     if err != nil {
+//             return errors.Wrap(err, "read failed")
+//     }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+//     type causer interface {
+//             Cause() error
+//     }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+//     switch err := errors.Cause(err).(type) {
+//     case *MyError:
+//             // handle specifically
+//     default:
+//             // unknown error
+//     }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+//     %s    print the error. If the error has a Cause it will be
+//           printed recursively.
+//     %v    see %s
+//     %+v   extended format. Each Frame of the error's StackTrace will
+//           be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+//     type stackTracer interface {
+//             StackTrace() errors.StackTrace
+//     }
+//
+// The returned errors.StackTrace type is defined as
+//
+//     type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+//     if err, ok := err.(stackTracer); ok {
+//             for _, f := range err.StackTrace() {
+//                     fmt.Printf("%+s:%d", f)
+//             }
+//     }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+	"fmt"
+	"io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+	return &fundamental{
+		msg:   message,
+		stack: callers(),
+	}
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+	return &fundamental{
+		msg:   fmt.Sprintf(format, args...),
+		stack: callers(),
+	}
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+	msg string
+	*stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			io.WriteString(s, f.msg)
+			f.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, f.msg)
+	case 'q':
+		fmt.Fprintf(s, "%q", f.msg)
+	}
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+type withStack struct {
+	error
+	*stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v", w.Cause())
+			w.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, w.Error())
+	case 'q':
+		fmt.Fprintf(s, "%q", w.Error())
+	}
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   message,
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   message,
+	}
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+}
+
+type withMessage struct {
+	cause error
+	msg   string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error  { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v\n", w.Cause())
+			io.WriteString(s, w.msg)
+			return
+		}
+		fallthrough
+	case 's', 'q':
+		io.WriteString(s, w.Error())
+	}
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+//     type causer interface {
+//            Cause() error
+//     }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+	type causer interface {
+		Cause() error
+	}
+
+	for err != nil {
+		cause, ok := err.(causer)
+		if !ok {
+			break
+		}
+		err = cause.Cause()
+	}
+	return err
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..2874a04
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,147 @@
+package errors
+
+import (
+	"fmt"
+	"io"
+	"path"
+	"runtime"
+	"strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	file, _ := fn.FileLine(f.pc())
+	return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return 0
+	}
+	_, line := fn.FileLine(f.pc())
+	return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+//    %s    source file
+//    %d    source line
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+s   function name and path of source file relative to the compile time
+//          GOPATH separated by \n\t (<funcname>\n\t<path>)
+//    %+v   equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 's':
+		switch {
+		case s.Flag('+'):
+			pc := f.pc()
+			fn := runtime.FuncForPC(pc)
+			if fn == nil {
+				io.WriteString(s, "unknown")
+			} else {
+				file, _ := fn.FileLine(pc)
+				fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+			}
+		default:
+			io.WriteString(s, path.Base(f.file()))
+		}
+	case 'd':
+		fmt.Fprintf(s, "%d", f.line())
+	case 'n':
+		name := runtime.FuncForPC(f.pc()).Name()
+		io.WriteString(s, funcname(name))
+	case 'v':
+		f.Format(s, 's')
+		io.WriteString(s, ":")
+		f.Format(s, 'd')
+	}
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+//    %s	lists source files for each Frame in the stack
+//    %v	lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+v   Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case s.Flag('+'):
+			for _, f := range st {
+				fmt.Fprintf(s, "\n%+v", f)
+			}
+		case s.Flag('#'):
+			fmt.Fprintf(s, "%#v", []Frame(st))
+		default:
+			fmt.Fprintf(s, "%v", []Frame(st))
+		}
+	case 's':
+		fmt.Fprintf(s, "%s", []Frame(st))
+	}
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case st.Flag('+'):
+			for _, pc := range *s {
+				f := Frame(pc)
+				fmt.Fprintf(st, "\n%+v", f)
+			}
+		}
+	}
+}
+
+func (s *stack) StackTrace() StackTrace {
+	f := make([]Frame, len(*s))
+	for i := 0; i < len(f); i++ {
+		f[i] = Frame((*s)[i])
+	}
+	return f
+}
+
+func callers() *stack {
+	const depth = 32
+	var pcs [depth]uintptr
+	n := runtime.Callers(3, pcs[:])
+	var st stack = pcs[0:n]
+	return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+	i := strings.LastIndex(name, "/")
+	name = name[i+1:]
+	i = strings.Index(name, ".")
+	return name[i+1:]
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..6b7d7d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..848938a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,25 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+git:
+  depth: 1
+env:
+  - GO111MODULE=on
+  - GO111MODULE=off
+go: [ 1.11.x, 1.12.x ]
+os: [ linux, osx ]
+matrix:
+  exclude:
+    - go: 1.12.x
+      env: GO111MODULE=off
+    - go: 1.11.x
+      os: osx
+install:
+  - ./travis/install.sh
+  - if [[ "$GO111MODULE" ==  "on" ]]; then go mod download; fi
+  - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
+script:
+  - ./travis/cross_build.sh
+  - export GOMAXPROCS=4
+  - export GORACE=halt_on_error=1
+  - go test -race -v ./...
+  - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..51a7ab0
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,200 @@
+# 1.4.2
+  * Fixes build break for plan9, nacl, solaris
+# 1.4.1
+This new release introduces:
+  * Enhance TextFormatter to not print caller information when they are empty (#944)
+  * Remove dependency on golang.org/x/crypto (#932, #943) 
+
+Fixes:
+  * Fix Entry.WithContext method to return a copy of the initial entry (#941)
+
+# 1.4.0
+This new release introduces:
+  * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
+  * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
+  * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
+
+Fixes:
+  * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
+  * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
+  * Fix infinite recursion on unknown `Level.String()` (#907)
+  * Fix race condition in `getCaller` (#916).
+
+
+# 1.3.0
+This new release introduces:
+  * Log, Logf, Logln functions for Logger and Entry that take a Level
+
+Fixes:
+  * Building prometheus node_exporter on AIX (#840)
+  * Race condition in TextFormatter (#468)
+  * Travis CI import path (#868)
+  * Remove coloured output on Windows (#862)
+  * Pointer to func as field in JSONFormatter (#870)
+  * Properly marshal Levels (#873)
+
+# 1.2.0
+This new release introduces:
+  * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+  * A new trace level named `Trace` whose level is below `Debug`
+  * A configurable exit function to be called upon a Fatal trace
+  * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+  * fix the build break on Solaris
+  * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+  * several fixes:
+    * a fix for a race condition on entry formatting
+    * proper cleanup of previously used entries before putting them back in the pool
+    * the extra new line at the end of message in text formatter has been removed
+  * a new global public API to check if a level is activated: IsLevelEnabled
+  * the following methods have been added to the Logger object
+    * IsLevelEnabled
+    * SetFormatter
+    * SetOutput
+    * ReplaceHooks
+  * introduction of go module
+  * an indent configuration for the json formatter
+  * output colour support for windows
+  * the field sort function is now configurable for text formatter
+  * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+  * a new api WithTime which allows to easily force the time of the log entry
+    which is mostly useful for logger wrapper
+  * a fix reverting the immutability of the entry given as parameter to the hooks
+    a new configuration field of the json formatter in order to put all the fields
+    in a nested dictionnary
+  * a new SetOutput method in the Logger
+  * a new configuration of the textformatter to configure the name of the default keys
+  * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..a4796eb
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,495 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+	log.SetFormatter(&log.TextFormatter{
+		DisableColors: true,
+		FullTimestamp: true,
+	})
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7.  You can validate this in your
+environment via benchmarks: 
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr
+  // Can be any io.Writer, see below for File example
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "os"
+  "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stdout
+
+  // You could set this to any `io.Writer` such as a file
+  // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+  // if err == nil {
+  //  log.Out = file
+  // } else {
+  //  log.Info("Failed to log to file, using default stderr")
+  // }
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+  logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`. For Windows, see
+    [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+  * When colors are enabled, levels are truncated to 4 characters by default. To disable
+    truncation set the `DisableLevelTruncation` field to `true`.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+  "github.com/sirupsen/logrus"
+  "github.com/sirupsen/logrus/hooks/test"
+  "github.com/stretchr/testify/assert"
+  "testing"
+)
+
+func TestSomething(t*testing.T){
+  logger, hook := test.NewNullLogger()
+  logger.Error("Helloerror")
+
+  assert.Equal(t, 1, len(hook.Entries))
+  assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+  assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+  hook.Reset()
+  assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8fd189e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,76 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
+
+// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
+// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
+// any Fatal log entry is made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func DeferExitHandler(handler func()) {
+	handlers = append([]func(){handler}, handlers...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"

+platform: x64

+clone_folder: c:\gopath\src\github.com\sirupsen\logrus

+environment:  

+  GOPATH: c:\gopath

+branches:  

+  only:

+    - master

+install:  

+  - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%

+  - go version

+build_script:  

+  - go get -t

+  - go test

diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..63e2558
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,407 @@
+package logrus
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	bufferPool *sync.Pool
+
+	// qualified package name, cached at first use
+	logrusPackage string
+
+	// Positions in the call stack when tracing to report the calling method
+	minimumCallerDepth int
+
+	// Used for caller information initialisation
+	callerInitOnce sync.Once
+)
+
+const (
+	maximumCallerDepth int = 25
+	knownLogrusFrames  int = 4
+)
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+
+	// start at the bottom of the stack before the package-name cache is primed
+	minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+	// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+	Level Level
+
+	// Calling method, with package name
+	Caller *runtime.Frame
+
+	// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), a Buffer may be set to entry
+	Buffer *bytes.Buffer
+
+	// Contains the context set by the user. Useful for hook processing etc.
+	Context context.Context
+
+	// err may contain a field formatting error
+	err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, plus one optional.  Give a little extra room.
+		Data: make(Fields, 6),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a context to the Entry.
+func (entry *Entry) WithContext(ctx context.Context) *Entry {
+	return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	fieldErr := entry.err
+	for k, v := range fields {
+		isErrField := false
+		if t := reflect.TypeOf(v); t != nil {
+			switch t.Kind() {
+			case reflect.Func:
+				isErrField = true
+			case reflect.Ptr:
+				isErrField = t.Elem().Kind() == reflect.Func
+			}
+		}
+		if isErrField {
+			tmp := fmt.Sprintf("can not add field %q", k)
+			if fieldErr != "" {
+				fieldErr = entry.err + ", " + tmp
+			} else {
+				fieldErr = tmp
+			}
+		} else {
+			data[k] = v
+		}
+	}
+	return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+	return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+	for {
+		lastPeriod := strings.LastIndex(f, ".")
+		lastSlash := strings.LastIndex(f, "/")
+		if lastPeriod > lastSlash {
+			f = f[:lastPeriod]
+		} else {
+			break
+		}
+	}
+
+	return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+
+	// cache this package's fully-qualified name
+	callerInitOnce.Do(func() {
+		pcs := make([]uintptr, 2)
+		_ = runtime.Callers(0, pcs)
+		logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
+
+		// now that we have the cache, we can skip a minimum count of known-logrus functions
+		// XXX this is dubious, the number of frames may vary
+		minimumCallerDepth = knownLogrusFrames
+	})
+
+	// Restrict the lookback frames to avoid runaway lookups
+	pcs := make([]uintptr, maximumCallerDepth)
+	depth := runtime.Callers(minimumCallerDepth, pcs)
+	frames := runtime.CallersFrames(pcs[:depth])
+
+	for f, again := frames.Next(); again; f, again = frames.Next() {
+		pkg := getPackageName(f.Function)
+
+		// If the caller isn't part of this package, we're done
+		if pkg != logrusPackage {
+			return &f
+		}
+	}
+
+	// if we got here, we failed to find the caller's context
+	return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+	return entry.Logger != nil &&
+		entry.Logger.ReportCaller &&
+		entry.Caller != nil
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+
+	// Default to now, but allow users to override if they want.
+	//
+	// We don't have to worry about polluting future calls to Entry#log()
+	// with this assignment because this function is declared with a
+	// non-pointer receiver.
+	if entry.Time.IsZero() {
+		entry.Time = time.Now()
+	}
+
+	entry.Level = level
+	entry.Message = msg
+	if entry.Logger.ReportCaller {
+		entry.Caller = getCaller()
+	}
+
+	entry.fireHooks()
+
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+
+	entry.write()
+
+	entry.Buffer = nil
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) fireHooks() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	err := entry.Logger.Hooks.Fire(entry.Level, entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+	}
+}
+
+func (entry *Entry) write() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+	} else {
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+	}
+}
+
+func (entry *Entry) Log(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.log(level, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+	entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	entry.Log(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	entry.Log(PanicLevel, args...)
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.Log(level, fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+	entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	entry.Logf(FatalLevel, format, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.Log(level, entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+	entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	entry.Logln(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..62fc2f2
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,225 @@
+package logrus
+
+import (
+	"context"
+	"io"
+	"time"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+	std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+	return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithContext creates an entry from the standard logger and adds a context to it.
+func WithContext(ctx context.Context) *Entry {
+	return std.WithContext(ctx)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+	return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+	std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+	std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+	std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+	defaultTimestampFormat = time.RFC3339
+	FieldKeyMsg            = "msg"
+	FieldKeyLevel          = "level"
+	FieldKeyTime           = "time"
+	FieldKeyLogrusError    = "logrus_error"
+	FieldKeyFunc           = "func"
+	FieldKeyFile           = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+	timeKey := fieldMap.resolve(FieldKeyTime)
+	if t, ok := data[timeKey]; ok {
+		data["fields."+timeKey] = t
+		delete(data, timeKey)
+	}
+
+	msgKey := fieldMap.resolve(FieldKeyMsg)
+	if m, ok := data[msgKey]; ok {
+		data["fields."+msgKey] = m
+		delete(data, msgKey)
+	}
+
+	levelKey := fieldMap.resolve(FieldKeyLevel)
+	if l, ok := data[levelKey]; ok {
+		data["fields."+levelKey] = l
+		delete(data, levelKey)
+	}
+
+	logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+	if l, ok := data[logrusErrKey]; ok {
+		data["fields."+logrusErrKey] = l
+		delete(data, logrusErrKey)
+	}
+
+	// If reportCaller is not set, 'func' will not conflict.
+	if reportCaller {
+		funcKey := fieldMap.resolve(FieldKeyFunc)
+		if l, ok := data[funcKey]; ok {
+			data["fields."+funcKey] = l
+		}
+		fileKey := fieldMap.resolve(FieldKeyFile)
+		if l, ok := data[fileKey]; ok {
+			data["fields."+fileKey] = l
+		}
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 0000000..12fdf98
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,10 @@
+module github.com/sirupsen/logrus
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/konsorten/go-windows-terminal-sequences v1.0.1
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/stretchr/objx v0.1.1 // indirect
+	github.com/stretchr/testify v1.2.2
+	golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 0000000..596c318
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,16 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..098a21a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,121 @@
+package logrus
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"runtime"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+	DataKey string
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime:  "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyMsg:   "@message",
+	// 		 FieldKeyFunc:  "@caller",
+	//    },
+	// }
+	FieldMap FieldMap
+
+	// CallerPrettyfier can be set by the user to modify the content
+	// of the function and file keys in the json data when ReportCaller is
+	// activated. If any of the returned value is the empty string the
+	// corresponding key will be removed from json fields.
+	CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+	// PrettyPrint will indent all json logs
+	PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+4)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+
+	if f.DataKey != "" {
+		newData := make(Fields, 4)
+		newData[f.DataKey] = data
+		data = newData
+	}
+
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+
+	if entry.err != "" {
+		data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+	}
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+	if entry.HasCaller() {
+		funcVal := entry.Caller.Function
+		fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+		if f.CallerPrettyfier != nil {
+			funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+		}
+		if funcVal != "" {
+			data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
+		}
+		if fileVal != "" {
+			data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
+		}
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	encoder := json.NewEncoder(b)
+	if f.PrettyPrint {
+		encoder.SetIndent("", "  ")
+	}
+	if err := encoder.Encode(data); err != nil {
+		return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+	}
+
+	return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..c0c0b1e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,351 @@
+package logrus
+
+import (
+	"context"
+	"io"
+	"os"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventurous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+
+	// Flag for whether to log caller info (off by default)
+	ReportCaller bool
+
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged.
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+	// Function to exit the application, defaults to `os.Exit()`
+	ExitFunc exitFunc
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:          os.Stderr,
+		Formatter:    new(TextFormatter),
+		Hooks:        make(LevelHooks),
+		Level:        InfoLevel,
+		ExitFunc:     os.Exit,
+		ReportCaller: false,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	entry.Data = map[string]interface{}{}
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+// Add a context to the log entry.
+func (logger *Logger) WithContext(ctx context.Context) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithContext(ctx)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logf(level, format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+	logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	logger.Logf(FatalLevel, format, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Log(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+	logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Print(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	logger.Log(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logln(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+	logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	logger.Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	logger.Logln(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+	runHandlers()
+	if logger.ExitFunc == nil {
+		logger.ExitFunc = os.Exit
+	}
+	logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+	return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+	atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+	return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+	return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+	logger.mu.Lock()
+	oldHooks := logger.Hooks
+	logger.Hooks = hooks
+	logger.mu.Unlock()
+	return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..8644761
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	if b, err := level.MarshalText(); err == nil {
+		return string(b)
+	} else {
+		return "unknown"
+	}
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	case "trace":
+		return TraceLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+	l, err := ParseLevel(string(text))
+	if err != nil {
+		return err
+	}
+
+	*level = Level(l)
+
+	return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+	switch level {
+	case TraceLevel:
+		return []byte("trace"), nil
+	case DebugLevel:
+		return []byte("debug"), nil
+	case InfoLevel:
+		return []byte("info"), nil
+	case WarnLevel:
+		return []byte("warning"), nil
+	case ErrorLevel:
+		return []byte("error"), nil
+	case FatalLevel:
+		return []byte("fatal"), nil
+	case PanicLevel:
+		return []byte("panic"), nil
+	}
+
+	return nil, fmt.Errorf("not a valid logrus level %d", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+	TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+	// TraceLevel level. Designates finer-grained informational events than the Debug.
+	TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+
+	// IsDebugEnabled() bool
+	// IsInfoEnabled() bool
+	// IsWarnEnabled() bool
+	// IsErrorEnabled() bool
+	// IsFatalEnabled() bool
+	// IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+	FieldLogger
+	Tracef(format string, args ...interface{})
+	Trace(args ...interface{})
+	Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
new file mode 100644
index 0000000..3c4f43f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -0,0 +1,13 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+func isTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
new file mode 100644
index 0000000..97af92c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
@@ -0,0 +1,11 @@
+// +build js nacl plan9
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..3293fb3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,17 @@
+// +build !appengine,!js,!windows,!nacl,!plan9
+
+package logrus
+
+import (
+	"io"
+	"os"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		return isTerminal(int(v.Fd()))
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
new file mode 100644
index 0000000..f6710b3
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
@@ -0,0 +1,11 @@
+package logrus
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func isTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+	return err == nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
new file mode 100644
index 0000000..355dc96
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -0,0 +1,13 @@
+// +build linux aix
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+func isTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+	return err == nil
+}
+
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..572889d
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,34 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+	"io"
+	"os"
+	"syscall"
+
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+	switch v := w.(type) {
+	case *os.File:
+		sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+	}
+}
+
+func checkIfTerminal(w io.Writer) bool {
+	var ret bool
+	switch v := w.(type) {
+	case *os.File:
+		var mode uint32
+		err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+		ret = (err == nil)
+	default:
+		ret = false
+	}
+	if ret {
+		initTerminal(w)
+	}
+	return ret
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..e01587c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,295 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	red    = 31
+	yellow = 33
+	blue   = 36
+	gray   = 37
+)
+
+var baseTimestamp time.Time
+
+func init() {
+	baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+	EnvironmentOverrideColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+
+	// The keys sorting function, when uninitialized it uses sort.Strings.
+	SortingFunc func([]string)
+
+	// Disables the truncation of the level text to 4 characters.
+	DisableLevelTruncation bool
+
+	// QuoteEmptyFields will wrap empty fields in quotes if true
+	QuoteEmptyFields bool
+
+	// Whether the logger's out is to a terminal
+	isTerminal bool
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &TextFormatter{
+	//     FieldMap: FieldMap{
+	//         FieldKeyTime:  "@timestamp",
+	//         FieldKeyLevel: "@level",
+	//         FieldKeyMsg:   "@message"}}
+	FieldMap FieldMap
+
+	// CallerPrettyfier can be set by the user to modify the content
+	// of the function and file keys in the data when ReportCaller is
+	// activated. If any of the returned value is the empty string the
+	// corresponding key will be removed from fields.
+	CallerPrettyfier func(*runtime.Frame) (function string, file string)
+
+	terminalInitOnce sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+	if entry.Logger != nil {
+		f.isTerminal = checkIfTerminal(entry.Logger.Out)
+	}
+}
+
+func (f *TextFormatter) isColored() bool {
+	isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+	if f.EnvironmentOverrideColors {
+		if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+			isColored = true
+		} else if ok && force == "0" {
+			isColored = false
+		} else if os.Getenv("CLICOLOR") == "0" {
+			isColored = false
+		}
+	}
+
+	return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields)
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+	keys := make([]string, 0, len(data))
+	for k := range data {
+		keys = append(keys, k)
+	}
+
+	var funcVal, fileVal string
+
+	fixedKeys := make([]string, 0, 4+len(data))
+	if !f.DisableTimestamp {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+	}
+	fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+	if entry.Message != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+	}
+	if entry.err != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+	}
+	if entry.HasCaller() {
+		if f.CallerPrettyfier != nil {
+			funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+		} else {
+			funcVal = entry.Caller.Function
+			fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+		}
+
+		if funcVal != "" {
+			fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
+		}
+		if fileVal != "" {
+			fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
+		}
+	}
+
+	if !f.DisableSorting {
+		if f.SortingFunc == nil {
+			sort.Strings(keys)
+			fixedKeys = append(fixedKeys, keys...)
+		} else {
+			if !f.isColored() {
+				fixedKeys = append(fixedKeys, keys...)
+				f.SortingFunc(fixedKeys)
+			} else {
+				f.SortingFunc(keys)
+			}
+		}
+	} else {
+		fixedKeys = append(fixedKeys, keys...)
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	f.terminalInitOnce.Do(func() { f.init(entry) })
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+	if f.isColored() {
+		f.printColored(b, entry, keys, data, timestampFormat)
+	} else {
+
+		for _, key := range fixedKeys {
+			var value interface{}
+			switch {
+			case key == f.FieldMap.resolve(FieldKeyTime):
+				value = entry.Time.Format(timestampFormat)
+			case key == f.FieldMap.resolve(FieldKeyLevel):
+				value = entry.Level.String()
+			case key == f.FieldMap.resolve(FieldKeyMsg):
+				value = entry.Message
+			case key == f.FieldMap.resolve(FieldKeyLogrusError):
+				value = entry.err
+			case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+				value = funcVal
+			case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+				value = fileVal
+			default:
+				value = data[key]
+			}
+			f.appendKeyValue(b, key, value)
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel, TraceLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())
+	if !f.DisableLevelTruncation {
+		levelText = levelText[0:4]
+	}
+
+	// Remove a single newline if it already exists in the message to keep
+	// the behavior of logrus text_formatter the same as the stdlib log package
+	entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+	caller := ""
+	if entry.HasCaller() {
+		funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
+		fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+
+		if f.CallerPrettyfier != nil {
+			funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
+		}
+
+		if fileVal == "" {
+			caller = funcVal
+		} else if funcVal == "" {
+			caller = fileVal
+		} else {
+			caller = fileVal + " " + funcVal
+		}
+	}
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+	}
+	for _, k := range keys {
+		v := data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+	if f.QuoteEmptyFields && len(text) == 0 {
+		return true
+	}
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+	if b.Len() > 0 {
+		b.WriteByte(' ')
+	}
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	stringVal, ok := value.(string)
+	if !ok {
+		stringVal = fmt.Sprint(value)
+	}
+
+	if !f.needsQuoting(stringVal) {
+		b.WriteString(stringVal)
+	} else {
+		b.WriteString(fmt.Sprintf("%q", stringVal))
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..9e1f751
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,64 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+	return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+
+	switch level {
+	case TraceLevel:
+		printFunc = entry.Trace
+	case DebugLevel:
+		printFunc = entry.Debug
+	case InfoLevel:
+		printFunc = entry.Info
+	case WarnLevel:
+		printFunc = entry.Warn
+	case ErrorLevel:
+		printFunc = entry.Error
+	case FatalLevel:
+		printFunc = entry.Fatal
+	case PanicLevel:
+		printFunc = entry.Panic
+	default:
+		printFunc = entry.Print
+	}
+
+	go entry.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		entry.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}