Finish porting to ONF infrastructure
- change of imports
- Adding vendor folder
- licensing
Change-Id: If2e7ed27d603668b848ae58c135e94a8db13a9e2
diff --git a/vendor/github.com/aead/cmac/.gitignore b/vendor/github.com/aead/cmac/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/aead/cmac/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/aead/cmac/LICENSE b/vendor/github.com/aead/cmac/LICENSE
new file mode 100644
index 0000000..b6a9210
--- /dev/null
+++ b/vendor/github.com/aead/cmac/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Andreas Auernhammer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/aead/cmac/README.md b/vendor/github.com/aead/cmac/README.md
new file mode 100644
index 0000000..5c1dda4
--- /dev/null
+++ b/vendor/github.com/aead/cmac/README.md
@@ -0,0 +1,12 @@
+[![Godoc Reference](https://godoc.org/github.com/aead/cmac?status.svg)](https://godoc.org/github.com/aead/cmac)
+
+## The CMAC/OMAC1 message authentication code
+
+The CMAC message authentication code is specified (with AES) in [RFC 4493](https://tools.ietf.org/html/rfc4493 "RFC 4493")
+and [RFC 4494](https://tools.ietf.org/html/rfc4494 "RFC 4494").
+CMAC is only specified with the AES.
+
+This implementation supports block ciphers with a block size of 64, 128, 256, 512 or 1024 bit.
+
+### Installation
+Install in your GOPATH: `go get -u github.com/aead/cmac`
diff --git a/vendor/github.com/aead/cmac/aes/aes-cmac.go b/vendor/github.com/aead/cmac/aes/aes-cmac.go
new file mode 100644
index 0000000..5ca3c6c
--- /dev/null
+++ b/vendor/github.com/aead/cmac/aes/aes-cmac.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// Package aes implements the CMAC MAC with the AES.
+// AES-CMAC is specified in RFC 4493 and RFC 4494.
+package aes // import "github.com/aead/cmac/aes"
+
+import (
+ aesCipher "crypto/aes"
+ "hash"
+
+ "github.com/aead/cmac"
+)
+
+// Sum computes the AES-CMAC checksum with the given tagsize of msg using the cipher.Block.
+func Sum(msg, key []byte, tagsize int) ([]byte, error) {
+ c, err := aesCipher.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ return cmac.Sum(msg, c, tagsize)
+}
+
+// Verify computes the AES-CMAC checksum with the given tagsize of msg and compares
+// it with the given mac. This functions returns true if and only if the given mac
+// is equal to the computed one.
+func Verify(mac, msg, key []byte, tagsize int) bool {
+ c, err := aesCipher.NewCipher(key)
+ if err != nil {
+ return false
+ }
+ return cmac.Verify(mac, msg, c, tagsize)
+}
+
+// New returns a hash.Hash computing the AES-CMAC checksum.
+func New(key []byte) (hash.Hash, error) {
+ return NewWithTagSize(key, aesCipher.BlockSize)
+}
+
+// NewWithTagSize returns a hash.Hash computing the AES-CMAC checksum with the
+// given tag size. The tag size must between the 1 and the cipher's block size.
+func NewWithTagSize(key []byte, tagsize int) (hash.Hash, error) {
+ c, err := aesCipher.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ return cmac.NewWithTagSize(c, tagsize)
+}
diff --git a/vendor/github.com/aead/cmac/cmac.go b/vendor/github.com/aead/cmac/cmac.go
new file mode 100644
index 0000000..1e90314
--- /dev/null
+++ b/vendor/github.com/aead/cmac/cmac.go
@@ -0,0 +1,201 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// Package cmac implements the fast CMAC MAC based on
+// a block cipher. This mode of operation fixes security
+// deficiencies of CBC-MAC (CBC-MAC is secure only for
+// fixed-length messages). CMAC is equal to OMAC1.
+// This implementations supports block ciphers with a
+// block size of:
+// - 64 bit
+// - 128 bit
+// - 256 bit
+// - 512 bit
+// - 1024 bit
+// Common ciphers like AES, Serpent etc. operate on 128 bit
+// blocks. 256, 512 and 1024 are supported for the Threefish
+// tweakable block cipher. Ciphers with 64 bit blocks are
+// supported, but not recommened.
+// CMAC (with AES) is specified in RFC 4493 and RFC 4494.
+package cmac // import "github.com/aead/cmac"
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "errors"
+ "hash"
+)
+
+const (
+ // minimal irreducible polynomial for blocksize
+ p64 = 0x1b // for 64 bit block ciphers
+ p128 = 0x87 // for 128 bit block ciphers (like AES)
+ p256 = 0x425 // special for large block ciphers (Threefish)
+ p512 = 0x125 // special for large block ciphers (Threefish)
+ p1024 = 0x80043 // special for large block ciphers (Threefish)
+)
+
+var (
+ errUnsupportedCipher = errors.New("cipher block size not supported")
+ errInvalidTagSize = errors.New("tags size must between 1 and the cipher's block size")
+)
+
+// Sum computes the CMAC checksum with the given tagsize of msg using the cipher.Block.
+func Sum(msg []byte, c cipher.Block, tagsize int) ([]byte, error) {
+ h, err := NewWithTagSize(c, tagsize)
+ if err != nil {
+ return nil, err
+ }
+ h.Write(msg)
+ return h.Sum(nil), nil
+}
+
+// Verify computes the CMAC checksum with the given tagsize of msg and compares
+// it with the given mac. This functions returns true if and only if the given mac
+// is equal to the computed one.
+func Verify(mac, msg []byte, c cipher.Block, tagsize int) bool {
+ sum, err := Sum(msg, c, tagsize)
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(mac, sum) == 1
+}
+
+// New returns a hash.Hash computing the CMAC checksum.
+func New(c cipher.Block) (hash.Hash, error) {
+ return NewWithTagSize(c, c.BlockSize())
+}
+
+// NewWithTagSize returns a hash.Hash computing the CMAC checksum with the
+// given tag size. The tag size must between the 1 and the cipher's block size.
+func NewWithTagSize(c cipher.Block, tagsize int) (hash.Hash, error) {
+ blocksize := c.BlockSize()
+
+ if tagsize <= 0 || tagsize > blocksize {
+ return nil, errInvalidTagSize
+ }
+
+ var p int
+ switch blocksize {
+ default:
+ return nil, errUnsupportedCipher
+ case 8:
+ p = p64
+ case 16:
+ p = p128
+ case 32:
+ p = p256
+ case 64:
+ p = p512
+ case 128:
+ p = p1024
+ }
+
+ m := &macFunc{
+ cipher: c,
+ k0: make([]byte, blocksize),
+ k1: make([]byte, blocksize),
+ buf: make([]byte, blocksize),
+ }
+ m.tagsize = tagsize
+ c.Encrypt(m.k0, m.k0)
+
+ v := shift(m.k0, m.k0)
+ m.k0[blocksize-1] ^= byte(subtle.ConstantTimeSelect(v, p, 0))
+
+ v = shift(m.k1, m.k0)
+ m.k1[blocksize-1] ^= byte(subtle.ConstantTimeSelect(v, p, 0))
+
+ return m, nil
+}
+
+// The CMAC message auth. function
+type macFunc struct {
+ cipher cipher.Block
+ k0, k1 []byte
+ buf []byte
+ off int
+ tagsize int
+}
+
+func (h *macFunc) Size() int { return h.cipher.BlockSize() }
+
+func (h *macFunc) BlockSize() int { return h.cipher.BlockSize() }
+
+func (h *macFunc) Reset() {
+ for i := range h.buf {
+ h.buf[i] = 0
+ }
+ h.off = 0
+}
+
+func (h *macFunc) Write(msg []byte) (int, error) {
+ bs := h.BlockSize()
+ n := len(msg)
+
+ if h.off > 0 {
+ dif := bs - h.off
+ if n > dif {
+ xor(h.buf[h.off:], msg[:dif])
+ msg = msg[dif:]
+ h.cipher.Encrypt(h.buf, h.buf)
+ h.off = 0
+ } else {
+ xor(h.buf[h.off:], msg)
+ h.off += n
+ return n, nil
+ }
+ }
+
+ if length := len(msg); length > bs {
+ nn := length & (^(bs - 1))
+ if length == nn {
+ nn -= bs
+ }
+ for i := 0; i < nn; i += bs {
+ xor(h.buf, msg[i:i+bs])
+ h.cipher.Encrypt(h.buf, h.buf)
+ }
+ msg = msg[nn:]
+ }
+
+ if length := len(msg); length > 0 {
+ xor(h.buf[h.off:], msg)
+ h.off += length
+ }
+
+ return n, nil
+}
+
+func (h *macFunc) Sum(b []byte) []byte {
+ blocksize := h.cipher.BlockSize()
+
+ // Don't change the buffer so the
+ // caller can keep writing and suming.
+ hash := make([]byte, blocksize)
+
+ if h.off < blocksize {
+ copy(hash, h.k1)
+ } else {
+ copy(hash, h.k0)
+ }
+
+ xor(hash, h.buf)
+ if h.off < blocksize {
+ hash[h.off] ^= 0x80
+ }
+
+ h.cipher.Encrypt(hash, hash)
+ return append(b, hash[:h.tagsize]...)
+}
+
+func shift(dst, src []byte) int {
+ var b, bit byte
+ for i := len(src) - 1; i >= 0; i-- { // a range would be nice
+ bit = src[i] >> 7
+ dst[i] = src[i]<<1 | b
+ b = bit
+ }
+ return int(b)
+}
diff --git a/vendor/github.com/aead/cmac/xor.go b/vendor/github.com/aead/cmac/xor.go
new file mode 100644
index 0000000..ecc3862
--- /dev/null
+++ b/vendor/github.com/aead/cmac/xor.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// +build !amd64
+
+package cmac
+
+// xor xors the bytes in dst with src and writes the result to dst.
+// The destination is assumed to have enough space.
+func xor(dst, src []byte) {
+ for i, v := range src {
+ dst[i] ^= v
+ }
+}
diff --git a/vendor/github.com/aead/cmac/xor_amd64.go b/vendor/github.com/aead/cmac/xor_amd64.go
new file mode 100644
index 0000000..3326a89
--- /dev/null
+++ b/vendor/github.com/aead/cmac/xor_amd64.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
+// Use of this source code is governed by a license that can be
+// found in the LICENSE file.
+
+// +build amd64, !gccgo, !appengine
+
+package cmac
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+// xor xors the bytes in dst with src and writes the result to dst.
+// The destination is assumed to have enough space.
+func xor(dst, src []byte) {
+ n := len(src)
+
+ w := n / wordSize
+ if w > 0 {
+ dstPtr := *(*[]uintptr)(unsafe.Pointer(&dst))
+ srcPtr := *(*[]uintptr)(unsafe.Pointer(&src))
+ for i, v := range srcPtr[:w] {
+ dstPtr[i] ^= v
+ }
+ }
+
+ for i := (n & (^(wordSize - 1))); i < n; i++ {
+ dst[i] ^= src[i]
+ }
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/deckarep/golang-set/.gitignore b/vendor/github.com/deckarep/golang-set/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/deckarep/golang-set/.travis.yml b/vendor/github.com/deckarep/golang-set/.travis.yml
new file mode 100644
index 0000000..c760d24
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - 1.8
+ - 1.9
+ - tip
+
+script:
+ - go test -race ./...
+ - go test -bench=.
+
diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE
new file mode 100644
index 0000000..b5768f8
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/LICENSE
@@ -0,0 +1,22 @@
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md
new file mode 100644
index 0000000..c3b50b2
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/README.md
@@ -0,0 +1,95 @@
+[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set)
+[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set)
+[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set)
+
+## golang-set
+
+
+The missing set collection for the Go language. Until Go has sets built-in...use this.
+
+Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python.
+You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository
+and carry-on and to the rest that find this useful please contribute in helping me make it better by:
+
+* Helping to make more idiomatic improvements to the code.
+* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
+* Helping to make the unit-tests more robust and kick-ass.
+* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
+* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.)
+
+I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
+
+*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
+
+## Features (as of 9/22/2014)
+
+* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product)
+
+## Features (as of 9/15/2014)
+
+* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
+
+## Features (as of 4/22/2014)
+
+* One common interface to both implementations
+* Two set implementations to choose from
+ * a thread-safe implementation designed for concurrent use
+ * a non-thread-safe implementation designed for performance
+* 75 benchmarks for both implementations
+* 35 unit tests for both implementations
+* 14 concurrent tests for the thread-safe implementation
+
+
+
+Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind
+however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
+
+## Examples but not exhaustive:
+
+```go
+requiredClasses := mapset.NewSet()
+requiredClasses.Add("Cooking")
+requiredClasses.Add("English")
+requiredClasses.Add("Math")
+requiredClasses.Add("Biology")
+
+scienceSlice := []interface{}{"Biology", "Chemistry"}
+scienceClasses := mapset.NewSetFromSlice(scienceSlice)
+
+electiveClasses := mapset.NewSet()
+electiveClasses.Add("Welding")
+electiveClasses.Add("Music")
+electiveClasses.Add("Automotive")
+
+bonusClasses := mapset.NewSet()
+bonusClasses.Add("Go Programming")
+bonusClasses.Add("Python Programming")
+
+//Show me all the available classes I can take
+allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
+fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
+
+
+//Is cooking considered a science class?
+fmt.Println(scienceClasses.Contains("Cooking")) //false
+
+//Show me all classes that are not science classes, since I hate science.
+fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
+
+//Which science classes are also required classes?
+fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
+
+//How many bonus classes do you offer?
+fmt.Println(bonusClasses.Cardinality()) //2
+
+//Do you have the following classes? Welding, Automotive and English?
+fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
+```
+
+Thanks!
+
+-Ralph
+
+[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
+
+[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon)
diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go
new file mode 100644
index 0000000..9dfecad
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/iterator.go
@@ -0,0 +1,58 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's
+// elements.
+type Iterator struct {
+ C <-chan interface{}
+ stop chan struct{}
+}
+
+// Stop stops the Iterator, no further elements will be received on C, C will be closed.
+func (i *Iterator) Stop() {
+ // Allows for Stop() to be called multiple times
+ // (close() panics when called on already closed channel)
+ defer func() {
+ recover()
+ }()
+
+ close(i.stop)
+
+ // Exhaust any remaining elements.
+ for range i.C {
+ }
+}
+
+// newIterator returns a new Iterator instance together with its item and stop channels.
+func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) {
+ itemChan := make(chan interface{})
+ stopChan := make(chan struct{})
+ return &Iterator{
+ C: itemChan,
+ stop: stopChan,
+ }, itemChan, stopChan
+}
diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go
new file mode 100644
index 0000000..29eb2e5
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/set.go
@@ -0,0 +1,217 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+// Package mapset implements a simple and generic set collection.
+// Items stored within it are unordered and unique. It supports
+// typical set operations: membership testing, intersection, union,
+// difference, symmetric difference and cloning.
+//
+// Package mapset provides two implementations of the Set
+// interface. The default implementation is safe for concurrent
+// access, but a non-thread-safe implementation is also provided for
+// programs that can benefit from the slight speed improvement and
+// that can enforce mutual exclusion through other means.
+package mapset
+
+// Set is the primary interface provided by the mapset package. It
+// represents an unordered set of data and a large number of
+// operations that can be applied to that set.
+type Set interface {
+ // Adds an element to the set. Returns whether
+ // the item was added.
+ Add(i interface{}) bool
+
+ // Returns the number of elements in the set.
+ Cardinality() int
+
+ // Removes all elements from the set, leaving
+ // the empty set.
+ Clear()
+
+ // Returns a clone of the set using the same
+ // implementation, duplicating all keys.
+ Clone() Set
+
+ // Returns whether the given items
+ // are all in the set.
+ Contains(i ...interface{}) bool
+
+ // Returns the difference between this set
+ // and other. The returned set will contain
+ // all elements of this set that are not also
+ // elements of other.
+ //
+ // Note that the argument to Difference
+ // must be of the same type as the receiver
+ // of the method. Otherwise, Difference will
+ // panic.
+ Difference(other Set) Set
+
+ // Determines if two sets are equal to each
+ // other. If they have the same cardinality
+ // and contain the same elements, they are
+ // considered equal. The order in which
+ // the elements were added is irrelevant.
+ //
+ // Note that the argument to Equal must be
+ // of the same type as the receiver of the
+ // method. Otherwise, Equal will panic.
+ Equal(other Set) bool
+
+ // Returns a new set containing only the elements
+ // that exist only in both sets.
+ //
+ // Note that the argument to Intersect
+ // must be of the same type as the receiver
+ // of the method. Otherwise, Intersect will
+ // panic.
+ Intersect(other Set) Set
+
+ // Determines if every element in this set is in
+ // the other set but the two sets are not equal.
+ //
+ // Note that the argument to IsProperSubset
+ // must be of the same type as the receiver
+ // of the method. Otherwise, IsProperSubset
+ // will panic.
+ IsProperSubset(other Set) bool
+
+ // Determines if every element in the other set
+ // is in this set but the two sets are not
+ // equal.
+ //
+ // Note that the argument to IsSuperset
+ // must be of the same type as the receiver
+ // of the method. Otherwise, IsSuperset will
+ // panic.
+ IsProperSuperset(other Set) bool
+
+ // Determines if every element in this set is in
+ // the other set.
+ //
+ // Note that the argument to IsSubset
+ // must be of the same type as the receiver
+ // of the method. Otherwise, IsSubset will
+ // panic.
+ IsSubset(other Set) bool
+
+ // Determines if every element in the other set
+ // is in this set.
+ //
+ // Note that the argument to IsSuperset
+ // must be of the same type as the receiver
+ // of the method. Otherwise, IsSuperset will
+ // panic.
+ IsSuperset(other Set) bool
+
+ // Iterates over elements and executes the passed func against each element.
+ // If passed func returns true, stop iteration at the time.
+ Each(func(interface{}) bool)
+
+ // Returns a channel of elements that you can
+ // range over.
+ Iter() <-chan interface{}
+
+ // Returns an Iterator object that you can
+ // use to range over the set.
+ Iterator() *Iterator
+
+ // Remove a single element from the set.
+ Remove(i interface{})
+
+ // Provides a convenient string representation
+ // of the current state of the set.
+ String() string
+
+ // Returns a new set with all elements which are
+ // in either this set or the other set but not in both.
+ //
+ // Note that the argument to SymmetricDifference
+ // must be of the same type as the receiver
+ // of the method. Otherwise, SymmetricDifference
+ // will panic.
+ SymmetricDifference(other Set) Set
+
+ // Returns a new set with all elements in both sets.
+ //
+ // Note that the argument to Union must be of the
+
+ // same type as the receiver of the method.
+ // Otherwise, IsSuperset will panic.
+ Union(other Set) Set
+
+ // Pop removes and returns an arbitrary item from the set.
+ Pop() interface{}
+
+ // Returns all subsets of a given set (Power Set).
+ PowerSet() Set
+
+ // Returns the Cartesian Product of two sets.
+ CartesianProduct(other Set) Set
+
+ // Returns the members of the set as a slice.
+ ToSlice() []interface{}
+}
+
+// NewSet creates and returns a reference to an empty set. Operations
+// on the resulting set are thread-safe.
+func NewSet(s ...interface{}) Set {
+ set := newThreadSafeSet()
+ for _, item := range s {
+ set.Add(item)
+ }
+ return &set
+}
+
+// NewSetWith creates and returns a new set with the given elements.
+// Operations on the resulting set are thread-safe.
+func NewSetWith(elts ...interface{}) Set {
+ return NewSetFromSlice(elts)
+}
+
+// NewSetFromSlice creates and returns a reference to a set from an
+// existing slice. Operations on the resulting set are thread-safe.
+func NewSetFromSlice(s []interface{}) Set {
+ a := NewSet(s...)
+ return a
+}
+
+// NewThreadUnsafeSet creates and returns a reference to an empty set.
+// Operations on the resulting set are not thread-safe.
+func NewThreadUnsafeSet() Set {
+ set := newThreadUnsafeSet()
+ return &set
+}
+
+// NewThreadUnsafeSetFromSlice creates and returns a reference to a
+// set from an existing slice. Operations on the resulting set are
+// not thread-safe.
+func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
+ a := NewThreadUnsafeSet()
+ for _, item := range s {
+ a.Add(item)
+ }
+ return a
+}
diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go
new file mode 100644
index 0000000..269b4ab
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/threadsafe.go
@@ -0,0 +1,283 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import "sync"
+
+type threadSafeSet struct {
+ s threadUnsafeSet
+ sync.RWMutex
+}
+
+func newThreadSafeSet() threadSafeSet {
+ return threadSafeSet{s: newThreadUnsafeSet()}
+}
+
+func (set *threadSafeSet) Add(i interface{}) bool {
+ set.Lock()
+ ret := set.s.Add(i)
+ set.Unlock()
+ return ret
+}
+
+func (set *threadSafeSet) Contains(i ...interface{}) bool {
+ set.RLock()
+ ret := set.s.Contains(i...)
+ set.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) IsSubset(other Set) bool {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ ret := set.s.IsSubset(&o.s)
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) IsProperSubset(other Set) bool {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ defer set.RUnlock()
+ o.RLock()
+ defer o.RUnlock()
+
+ return set.s.IsProperSubset(&o.s)
+}
+
+func (set *threadSafeSet) IsSuperset(other Set) bool {
+ return other.IsSubset(set)
+}
+
+func (set *threadSafeSet) IsProperSuperset(other Set) bool {
+ return other.IsProperSubset(set)
+}
+
+func (set *threadSafeSet) Union(other Set) Set {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeUnion}
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) Intersect(other Set) Set {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeIntersection}
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) Difference(other Set) Set {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeDifference}
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) SymmetricDifference(other Set) Set {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeDifference}
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) Clear() {
+ set.Lock()
+ set.s = newThreadUnsafeSet()
+ set.Unlock()
+}
+
+func (set *threadSafeSet) Remove(i interface{}) {
+ set.Lock()
+ delete(set.s, i)
+ set.Unlock()
+}
+
+func (set *threadSafeSet) Cardinality() int {
+ set.RLock()
+ defer set.RUnlock()
+ return len(set.s)
+}
+
+func (set *threadSafeSet) Each(cb func(interface{}) bool) {
+ set.RLock()
+ for elem := range set.s {
+ if cb(elem) {
+ break
+ }
+ }
+ set.RUnlock()
+}
+
+func (set *threadSafeSet) Iter() <-chan interface{} {
+ ch := make(chan interface{})
+ go func() {
+ set.RLock()
+
+ for elem := range set.s {
+ ch <- elem
+ }
+ close(ch)
+ set.RUnlock()
+ }()
+
+ return ch
+}
+
+func (set *threadSafeSet) Iterator() *Iterator {
+ iterator, ch, stopCh := newIterator()
+
+ go func() {
+ set.RLock()
+ L:
+ for elem := range set.s {
+ select {
+ case <-stopCh:
+ break L
+ case ch <- elem:
+ }
+ }
+ close(ch)
+ set.RUnlock()
+ }()
+
+ return iterator
+}
+
+func (set *threadSafeSet) Equal(other Set) bool {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ ret := set.s.Equal(&o.s)
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) Clone() Set {
+ set.RLock()
+
+ unsafeClone := set.s.Clone().(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeClone}
+ set.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) String() string {
+ set.RLock()
+ ret := set.s.String()
+ set.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) PowerSet() Set {
+ set.RLock()
+ unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet)
+ set.RUnlock()
+
+ ret := &threadSafeSet{s: newThreadUnsafeSet()}
+ for subset := range unsafePowerSet.Iter() {
+ unsafeSubset := subset.(*threadUnsafeSet)
+ ret.Add(&threadSafeSet{s: *unsafeSubset})
+ }
+ return ret
+}
+
+func (set *threadSafeSet) Pop() interface{} {
+ set.Lock()
+ defer set.Unlock()
+ return set.s.Pop()
+}
+
+func (set *threadSafeSet) CartesianProduct(other Set) Set {
+ o := other.(*threadSafeSet)
+
+ set.RLock()
+ o.RLock()
+
+ unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
+ ret := &threadSafeSet{s: *unsafeCartProduct}
+ set.RUnlock()
+ o.RUnlock()
+ return ret
+}
+
+func (set *threadSafeSet) ToSlice() []interface{} {
+ keys := make([]interface{}, 0, set.Cardinality())
+ set.RLock()
+ for elem := range set.s {
+ keys = append(keys, elem)
+ }
+ set.RUnlock()
+ return keys
+}
+
+func (set *threadSafeSet) MarshalJSON() ([]byte, error) {
+ set.RLock()
+ b, err := set.s.MarshalJSON()
+ set.RUnlock()
+
+ return b, err
+}
+
+func (set *threadSafeSet) UnmarshalJSON(p []byte) error {
+ set.RLock()
+ err := set.s.UnmarshalJSON(p)
+ set.RUnlock()
+
+ return err
+}
diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go
new file mode 100644
index 0000000..10bdd46
--- /dev/null
+++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go
@@ -0,0 +1,337 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+type threadUnsafeSet map[interface{}]struct{}
+
+// An OrderedPair represents a 2-tuple of values.
+type OrderedPair struct {
+ First interface{}
+ Second interface{}
+}
+
+func newThreadUnsafeSet() threadUnsafeSet {
+ return make(threadUnsafeSet)
+}
+
+// Equal says whether two 2-tuples contain the same values in the same order.
+func (pair *OrderedPair) Equal(other OrderedPair) bool {
+ if pair.First == other.First &&
+ pair.Second == other.Second {
+ return true
+ }
+
+ return false
+}
+
+func (set *threadUnsafeSet) Add(i interface{}) bool {
+ _, found := (*set)[i]
+ if found {
+ return false //False if it existed already
+ }
+
+ (*set)[i] = struct{}{}
+ return true
+}
+
+func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
+ for _, val := range i {
+ if _, ok := (*set)[val]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (set *threadUnsafeSet) IsSubset(other Set) bool {
+ _ = other.(*threadUnsafeSet)
+ for elem := range *set {
+ if !other.Contains(elem) {
+ return false
+ }
+ }
+ return true
+}
+
+func (set *threadUnsafeSet) IsProperSubset(other Set) bool {
+ return set.IsSubset(other) && !set.Equal(other)
+}
+
+func (set *threadUnsafeSet) IsSuperset(other Set) bool {
+ return other.IsSubset(set)
+}
+
+func (set *threadUnsafeSet) IsProperSuperset(other Set) bool {
+ return set.IsSuperset(other) && !set.Equal(other)
+}
+
+func (set *threadUnsafeSet) Union(other Set) Set {
+ o := other.(*threadUnsafeSet)
+
+ unionedSet := newThreadUnsafeSet()
+
+ for elem := range *set {
+ unionedSet.Add(elem)
+ }
+ for elem := range *o {
+ unionedSet.Add(elem)
+ }
+ return &unionedSet
+}
+
+func (set *threadUnsafeSet) Intersect(other Set) Set {
+ o := other.(*threadUnsafeSet)
+
+ intersection := newThreadUnsafeSet()
+ // loop over smaller set
+ if set.Cardinality() < other.Cardinality() {
+ for elem := range *set {
+ if other.Contains(elem) {
+ intersection.Add(elem)
+ }
+ }
+ } else {
+ for elem := range *o {
+ if set.Contains(elem) {
+ intersection.Add(elem)
+ }
+ }
+ }
+ return &intersection
+}
+
+func (set *threadUnsafeSet) Difference(other Set) Set {
+ _ = other.(*threadUnsafeSet)
+
+ difference := newThreadUnsafeSet()
+ for elem := range *set {
+ if !other.Contains(elem) {
+ difference.Add(elem)
+ }
+ }
+ return &difference
+}
+
+func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
+ _ = other.(*threadUnsafeSet)
+
+ aDiff := set.Difference(other)
+ bDiff := other.Difference(set)
+ return aDiff.Union(bDiff)
+}
+
+func (set *threadUnsafeSet) Clear() {
+ *set = newThreadUnsafeSet()
+}
+
+func (set *threadUnsafeSet) Remove(i interface{}) {
+ delete(*set, i)
+}
+
+func (set *threadUnsafeSet) Cardinality() int {
+ return len(*set)
+}
+
+func (set *threadUnsafeSet) Each(cb func(interface{}) bool) {
+ for elem := range *set {
+ if cb(elem) {
+ break
+ }
+ }
+}
+
+func (set *threadUnsafeSet) Iter() <-chan interface{} {
+ ch := make(chan interface{})
+ go func() {
+ for elem := range *set {
+ ch <- elem
+ }
+ close(ch)
+ }()
+
+ return ch
+}
+
+func (set *threadUnsafeSet) Iterator() *Iterator {
+ iterator, ch, stopCh := newIterator()
+
+ go func() {
+ L:
+ for elem := range *set {
+ select {
+ case <-stopCh:
+ break L
+ case ch <- elem:
+ }
+ }
+ close(ch)
+ }()
+
+ return iterator
+}
+
+func (set *threadUnsafeSet) Equal(other Set) bool {
+ _ = other.(*threadUnsafeSet)
+
+ if set.Cardinality() != other.Cardinality() {
+ return false
+ }
+ for elem := range *set {
+ if !other.Contains(elem) {
+ return false
+ }
+ }
+ return true
+}
+
+func (set *threadUnsafeSet) Clone() Set {
+ clonedSet := newThreadUnsafeSet()
+ for elem := range *set {
+ clonedSet.Add(elem)
+ }
+ return &clonedSet
+}
+
+func (set *threadUnsafeSet) String() string {
+ items := make([]string, 0, len(*set))
+
+ for elem := range *set {
+ items = append(items, fmt.Sprintf("%v", elem))
+ }
+ return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
+}
+
+// String outputs a 2-tuple in the form "(A, B)".
+func (pair OrderedPair) String() string {
+ return fmt.Sprintf("(%v, %v)", pair.First, pair.Second)
+}
+
+func (set *threadUnsafeSet) Pop() interface{} {
+ for item := range *set {
+ delete(*set, item)
+ return item
+ }
+ return nil
+}
+
+func (set *threadUnsafeSet) PowerSet() Set {
+ powSet := NewThreadUnsafeSet()
+ nullset := newThreadUnsafeSet()
+ powSet.Add(&nullset)
+
+ for es := range *set {
+ u := newThreadUnsafeSet()
+ j := powSet.Iter()
+ for er := range j {
+ p := newThreadUnsafeSet()
+ if reflect.TypeOf(er).Name() == "" {
+ k := er.(*threadUnsafeSet)
+ for ek := range *(k) {
+ p.Add(ek)
+ }
+ } else {
+ p.Add(er)
+ }
+ p.Add(es)
+ u.Add(&p)
+ }
+
+ powSet = powSet.Union(&u)
+ }
+
+ return powSet
+}
+
+func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
+ o := other.(*threadUnsafeSet)
+ cartProduct := NewThreadUnsafeSet()
+
+ for i := range *set {
+ for j := range *o {
+ elem := OrderedPair{First: i, Second: j}
+ cartProduct.Add(elem)
+ }
+ }
+
+ return cartProduct
+}
+
+func (set *threadUnsafeSet) ToSlice() []interface{} {
+ keys := make([]interface{}, 0, set.Cardinality())
+ for elem := range *set {
+ keys = append(keys, elem)
+ }
+
+ return keys
+}
+
+// MarshalJSON creates a JSON array from the set, it marshals all elements
+func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) {
+ items := make([]string, 0, set.Cardinality())
+
+ for elem := range *set {
+ b, err := json.Marshal(elem)
+ if err != nil {
+ return nil, err
+ }
+
+ items = append(items, string(b))
+ }
+
+ return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil
+}
+
+// UnmarshalJSON recreates a set from a JSON array, it only decodes
+// primitive types. Numbers are decoded as json.Number.
+func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error {
+ var i []interface{}
+
+ d := json.NewDecoder(bytes.NewReader(b))
+ d.UseNumber()
+ err := d.Decode(&i)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range i {
+ switch t := v.(type) {
+ case []interface{}, map[string]interface{}:
+ continue
+ default:
+ set.Add(t)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore
new file mode 100644
index 0000000..149266f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+#*
+*~
+
+# examples binaries
+examples/synscan/synscan
+examples/pfdump/pfdump
+examples/pcapdump/pcapdump
+examples/httpassembly/httpassembly
+examples/statsassembly/statsassembly
+examples/arpscan/arpscan
+examples/bidirectional/bidirectional
+examples/bytediff/bytediff
+examples/reassemblydump/reassemblydump
+layers/gen
+macs/gen
+pcap/pcap_tester
diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh
new file mode 100644
index 0000000..e341a1c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.gofmt.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+if [ -n "$(go fmt ./...)" ]; then
+ echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
+ exit 1
+fi
diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh
new file mode 100644
index 0000000..0e267f5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.golint.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+
+go get golang.org/x/lint/golint
+DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag"
+# Add subdirectories here as we clean up golint on each.
+for subdir in $DIRS; do
+ pushd $subdir
+ if golint |
+ grep -v CannotSetRFMon | # pcap exported error name
+ grep -v DataLost | # tcpassembly/tcpreader exported error name
+ grep .; then
+ exit 1
+ fi
+ popd
+done
+
+pushd layers
+for file in *.go; do
+ if cat .lint_blacklist | grep -q $file; then
+ echo "Skipping lint of $file due to .lint_blacklist"
+ elif golint $file | grep .; then
+ echo "Lint error in file $file"
+ exit 1
+ fi
+done
+popd
diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh
new file mode 100644
index 0000000..a5c1354
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.govet.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag"
+set -e
+for subdir in $DIRS; do
+ pushd $subdir
+ go vet
+ popd
+done
diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh
new file mode 100644
index 0000000..648c901
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.install.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ev
+
+go get github.com/google/gopacket
+go get github.com/google/gopacket/layers
+go get github.com/google/gopacket/tcpassembly
+go get github.com/google/gopacket/reassembly
+go get github.com/google/gopacket/pcapgo
diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh
new file mode 100644
index 0000000..a483f4f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.script.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ev
+
+go test github.com/google/gopacket
+go test github.com/google/gopacket/layers
+go test github.com/google/gopacket/tcpassembly
+go test github.com/google/gopacket/reassembly
+go test github.com/google/gopacket/pcapgo
+go test github.com/google/gopacket/pcap
diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml
new file mode 100644
index 0000000..8ebb01d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.yml
@@ -0,0 +1,55 @@
+language: go
+go:
+ - 1.11.x
+ - 1.12.x
+ - master
+
+addons:
+ apt:
+ packages:
+ libpcap-dev
+
+# use modules except for older versions (see below)
+install: true
+
+env:
+ - GO111MODULE=on
+
+script: ./.travis.script.sh
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: master
+
+jobs:
+ include:
+ - go: 1.5.x
+ install: ./.travis.install.sh
+ - go: 1.6.x
+ install: ./.travis.install.sh
+ - go: 1.7.x
+ install: ./.travis.install.sh
+ - go: 1.8.x
+ install: ./.travis.install.sh
+ - go: 1.9.x
+ install: ./.travis.install.sh
+ - go: 1.10.x
+ install: ./.travis.install.sh
+ - os: osx
+ go: 1.x
+ - os: windows
+ go: 1.x
+ # winpcap does not work on travis ci - so install nmap to get libpcap
+ before_install: choco install nmap
+ - stage: style
+ name: "fmt/vet/lint"
+ go: 1.x
+ script:
+ - ./.travis.gofmt.sh
+ - ./.travis.govet.sh
+ - ./.travis.golint.sh
+
+stages:
+ - style
+ - test
diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS
new file mode 100644
index 0000000..8431985
--- /dev/null
+++ b/vendor/github.com/google/gopacket/AUTHORS
@@ -0,0 +1,52 @@
+AUTHORS AND MAINTAINERS:
+
+MAIN DEVELOPERS:
+Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
+
+AUTHORS:
+Nigel Tao <nigeltao@google.com>
+Cole Mickens <cole.mickens@gmail.com>
+Ben Daglish <bdaglish@restorepoint.com>
+Luis Martinez <martinezlc99@gmail.com>
+Remco Verhoef <remco@dutchcoders.io>
+Hiroaki Kawai <Hiroaki.Kawai@gmail.com>
+Lukas Lueg <lukas.lueg@gmail.com>
+Laurent Hausermann <laurent.hausermann@gmail.com>
+Bill Green <bgreen@newrelic.com>
+Christian Mäder <christian.maeder@nine.ch>
+Gernot Vormayr <gvormayr@gmail.com>
+Vitor Garcia Graveto <victor.graveto@gmail.com>
+Elias Chavarria Reyes <elchavar@cisco.com>
+
+CONTRIBUTORS:
+Attila Oláh <attila@attilaolah.eu>
+Vittus Mikiassen <matt.miki.vimik@gmail.com>
+Matthias Radestock <matthias.radestock@gmail.com>
+Matthew Sackman <matthew@wellquite.org>
+Loic Prylli <loicp@google.com>
+Alexandre Fiori <fiorix@gmail.com>
+Adrian Tam <adrian.c.m.tam@gmail.com>
+Satoshi Matsumoto <kaorimatz@gmail.com>
+David Stainton <dstainton415@gmail.com>
+Jesse Ward <jesse@jesseward.com>
+Kane Mathers <kane@kanemathers.name>
+Jose Selvi <jselvi@pentester.es>
+Yerden Zhumabekov <yerden.zhumabekov@gmail.com>
+
+-----------------------------------------------
+FORKED FROM github.com/akrennmair/gopcap
+ALL THE FOLLOWING ARE FOR THAT PROJECT
+
+MAIN DEVELOPERS:
+Andreas Krennmair <ak@synflood.at>
+
+CONTRIBUTORS:
+Andrea Nall <anall@andreanall.com>
+Daniel Arndt <danielarndt@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
+Guillaume Savary <guillaume@savary.name>
+Mark Smith <mark@qq.is>
+Miek Gieben <miek@miek.nl>
+Mike Bell <mike@mikebell.org>
+Trevor Strohman <strohman@google.com>
diff --git a/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/google/gopacket/CONTRIBUTING.md
new file mode 100644
index 0000000..99ab7a2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/CONTRIBUTING.md
@@ -0,0 +1,215 @@
+Contributing To gopacket
+========================
+
+So you've got some code and you'd like it to be part of gopacket... wonderful!
+We're happy to accept contributions, whether they're fixes to old protocols, new
+protocols entirely, or anything else you think would improve the gopacket
+library. This document is designed to help you to do just that.
+
+The first section deals with the plumbing: how to actually get a change
+submitted.
+
+The second section deals with coding style... Go is great in that it
+has a uniform style implemented by 'go fmt', but there's still some decisions
+we've made that go above and beyond, and if you follow them, they won't come up
+in your code review.
+
+The third section deals with some of the implementation decisions we've made,
+which may help you to understand the current code and which we may ask you to
+conform to (or provide compelling reasons for ignoring).
+
+Overall, we hope this document will help you to understand our system and write
+great code which fits in, and help us to turn around on your code review quickly
+so the code can make it into the master branch as quickly as possible.
+
+
+How To Submit Code
+------------------
+
+We use github.com's Pull Request feature to receive code contributions from
+external contributors. See
+https://help.github.com/articles/creating-a-pull-request/ for details on
+how to create a request.
+
+Also, there's a local script `gc` in the base directory of GoPacket that
+runs a local set of checks, which should give you relatively high confidence
+that your pull won't fail github pull checks.
+
+```sh
+go get github.com/google/gopacket
+cd $GOROOT/src/pkg/github.com/google/gopacket
+git checkout -b <mynewfeature> # create a new branch to work from
+... code code code ...
+./gc # Run this to do local commits, it performs a number of checks
+```
+
+To sum up:
+
+* DO
+ + Pull down the latest version.
+ + Make a feature-specific branch.
+ + Code using the style and methods discussed in the rest of this document.
+ + Use the ./gc command to do local commits or check correctness.
+ + Push your new feature branch up to github.com, as a pull request.
+ + Handle comments and requests from reviewers, pushing new commits up to
+ your feature branch as problems are addressed.
+ + Put interesting comments and discussions into commit comments.
+* DON'T
+ + Push to someone else's branch without their permission.
+
+
+Coding Style
+------------
+
+* Go code must be run through `go fmt`, `go vet`, and `golint`
+* Follow http://golang.org/doc/effective_go.html as much as possible.
+ + In particular, http://golang.org/doc/effective_go.html#mixed-caps. Enums
+ should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs.
+ TcpSourcePort or TCP_SOURCE_PORT).
+* Bonus points for giving enum types a String() field.
+* Any exported types or functions should have commentary
+ (http://golang.org/doc/effective_go.html#commentary)
+
+
+Coding Methods And Implementation Notes
+---------------------------------------
+
+### Error Handling
+
+Many times, you'll be decoding a protocol and run across something bad, a packet
+corruption or the like. How do you handle this? First off, ALWAYS report the
+error. You can do this either by returning the error from the decode() function
+(most common), or if you're up for it you can implement and add an ErrorLayer
+through the packet builder (the first method is a simple shortcut that does
+exactly this, then stops any future decoding).
+
+Often, you'll already have decode some part of your protocol by the time you hit
+your error. Use your own discretion to determine whether the stuff you've
+already decoded should be returned to the caller or not:
+
+```go
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ prot := &MyProtocol{}
+ if len(data) < 10 {
+ // This error occurred before we did ANYTHING, so there's nothing in my
+ // protocol that the caller could possibly want. Just return the error.
+ return fmt.Errorf("Length %d less than 10", len(data))
+ }
+ prot.ImportantField1 = data[:5]
+ prot.ImportantField2 = data[5:10]
+ // At this point, we've already got enough information in 'prot' to
+ // warrant returning it to the caller, so we'll add it now.
+ p.AddLayer(prot)
+ if len(data) < 15 {
+ // We encountered an error later in the packet, but the caller already
+ // has the important info we've gleaned so far.
+ return fmt.Errorf("Length %d less than 15", len(data))
+ }
+ prot.ImportantField3 = data[10:15]
+ return nil // We've already added the layer, we can just return success.
+}
+```
+
+In general, our code follows the approach of returning the first error it
+encounters. In general, we don't trust any bytes after the first error we see.
+
+### What Is A Layer?
+
+The definition of a layer is up to the discretion of the coder. It should be
+something important enough that it's actually useful to the caller (IE: every
+TLV value should probably NOT be a layer). However, it can be more granular
+than a single protocol... IPv6 and SCTP both implement many layers to handle the
+various parts of the protocol. Use your best judgement, and prepare to defend
+your decisions during code review. ;)
+
+### Performance
+
+We strive to make gopacket as fast as possible while still providing lots of
+features. In general, this means:
+
+* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize
+ others on an as-needed basis (tons of MPLS on your network? Time to optimize
+ MPLS!)
+* Use fast operations. See the toplevel benchmark_test for benchmarks of some
+ of Go's underlying features and types.
+* Test your performance changes! You should use the ./gc script's --benchmark
+ flag to submit any performance-related changes. Use pcap/gopacket_benchmark
+ to test your change against a PCAP file based on your traffic patterns.
+* Don't be TOO hacky. Sometimes, removing an unused struct from a field causes
+ a huge performance hit, due to the way that Go currently handles its segmented
+ stack... don't be afraid to clean it up anyway. We'll trust the Go compiler
+ to get good enough over time to handle this. Also, this type of
+ compiler-specific optimization is very fragile; someone adding a field to an
+ entirely different struct elsewhere in the codebase could reverse any gains
+ you might achieve by aligning your allocations.
+* Try to minimize memory allocations. If possible, use []byte to reference
+ pieces of the input, instead of using string, which requires copying the bytes
+ into a new memory allocation.
+* Think hard about what should be evaluated lazily vs. not. In general, a
+ layer's struct should almost exactly mirror the layer's frame. Anything
+ that's more interesting should be a function. This may not always be
+ possible, but it's a good rule of thumb.
+* Don't fear micro-optimizations. With the above in mind, we welcome
+ micro-optimizations that we think will have positive/neutral impacts on the
+ majority of workloads. A prime example of this is pre-allocating certain
+ structs within a larger one:
+
+```go
+type MyProtocol struct {
+ // Most packets have 1-4 of VeryCommon, so we preallocate it here.
+ initialAllocation [4]uint32
+ VeryCommon []uint32
+}
+
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ prot := &MyProtocol{}
+ prot.VeryCommon = proto.initialAllocation[:0]
+ for len(data) > 4 {
+ field := binary.BigEndian.Uint32(data[:4])
+ data = data[4:]
+ // Since we're using the underlying initialAllocation, we won't need to
+ // allocate new memory for the following append unless we more than 16
+ // bytes of data, which should be the uncommon case.
+ prot.VeryCommon = append(prot.VeryCommon, field)
+ }
+ p.AddLayer(prot)
+ if len(data) > 0 {
+ return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data))
+ }
+ return nil
+}
+```
+
+### Slices And Data
+
+If you're pulling a slice from the data you're decoding, don't copy it. Just
+use the slice itself.
+
+```go
+type MyProtocol struct {
+ A, B net.IP
+}
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ p.AddLayer(&MyProtocol{
+ A: data[:4],
+ B: data[4:8],
+ })
+ return nil
+}
+```
+
+The caller has already agreed, by using this library, that they won't modify the
+set of bytes they pass in to the decoder, or the library has already copied the
+set of bytes to a read-only location. See DecodeOptions.NoCopy for more
+information.
+
+### Enums/Types
+
+If a protocol has an integer field (uint8, uint16, etc) with a couple of known
+values that mean something special, make it a type. This allows us to do really
+nice things like adding a String() function to them, so we can more easily
+display those to users. Check out layers/enums.go for one example, as well as
+layers/icmp.go for layer-specific enums.
+
+When naming things, try for descriptiveness over suscinctness. For example,
+choose DNSResponseRecord over DNSRR.
diff --git a/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/google/gopacket/LICENSE
new file mode 100644
index 0000000..2100d52
--- /dev/null
+++ b/vendor/github.com/google/gopacket/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Google, Inc. All rights reserved.
+Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Andreas Krennmair, Google, nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md
new file mode 100644
index 0000000..a2f48a9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/README.md
@@ -0,0 +1,12 @@
+# GoPacket
+
+This library provides packet decoding capabilities for Go.
+See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
+
+[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket)
+[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket)
+
+Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Originally forked from the gopcap project written by Andreas
+Krennmair <ak@synflood.at> (http://github.com/akrennmair/gopcap).
diff --git a/vendor/github.com/google/gopacket/base.go b/vendor/github.com/google/gopacket/base.go
new file mode 100644
index 0000000..797b55f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/base.go
@@ -0,0 +1,178 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// Layer represents a single decoded packet layer (using either the
+// OSI or TCP/IP definition of a layer). When decoding, a packet's data is
+// broken up into a number of layers. The caller may call LayerType() to
+// figure out which type of layer they've received from the packet. Optionally,
+// they may then use a type assertion to get the actual layer type for deep
+// inspection of the data.
+type Layer interface {
+ // LayerType is the gopacket type for this layer.
+ LayerType() LayerType
+ // LayerContents returns the set of bytes that make up this layer.
+ LayerContents() []byte
+ // LayerPayload returns the set of bytes contained within this layer, not
+ // including the layer itself.
+ LayerPayload() []byte
+}
+
+// Payload is a Layer containing the payload of a packet. The definition of
+// what constitutes the payload of a packet depends on previous layers; for
+// TCP and UDP, we stop decoding above layer 4 and return the remaining
+// bytes as a Payload. Payload is an ApplicationLayer.
+type Payload []byte
+
+// LayerType returns LayerTypePayload
+func (p Payload) LayerType() LayerType { return LayerTypePayload }
+
+// LayerContents returns the bytes making up this layer.
+func (p Payload) LayerContents() []byte { return []byte(p) }
+
+// LayerPayload returns the payload within this layer.
+func (p Payload) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as bytes.
+func (p Payload) Payload() []byte { return []byte(p) }
+
+// String implements fmt.Stringer.
+func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) }
+
+// GoString implements fmt.GoStringer.
+func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) }
+
+// CanDecode implements DecodingLayer.
+func (p Payload) CanDecode() LayerClass { return LayerTypePayload }
+
+// NextLayerType implements DecodingLayer.
+func (p Payload) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+ *p = Payload(data)
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+ bytes, err := b.PrependBytes(len(p))
+ if err != nil {
+ return err
+ }
+ copy(bytes, p)
+ return nil
+}
+
+// decodePayload decodes data by returning it all in a Payload layer.
+func decodePayload(data []byte, p PacketBuilder) error {
+ payload := &Payload{}
+ if err := payload.DecodeFromBytes(data, p); err != nil {
+ return nil
+ }
+ p.AddLayer(payload)
+ p.SetApplicationLayer(payload)
+ return nil
+}
+
+// Fragment is a Layer containing a fragment of a larger frame, used by layers
+// like IPv4 and IPv6 that allow for fragmentation of their payloads.
+type Fragment []byte
+
+// LayerType returns LayerTypeFragment
+func (p *Fragment) LayerType() LayerType { return LayerTypeFragment }
+
+// LayerContents implements Layer.
+func (p *Fragment) LayerContents() []byte { return []byte(*p) }
+
+// LayerPayload implements Layer.
+func (p *Fragment) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as a byte slice.
+func (p *Fragment) Payload() []byte { return []byte(*p) }
+
+// String implements fmt.Stringer.
+func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) }
+
+// CanDecode implements DecodingLayer.
+func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment }
+
+// NextLayerType implements DecodingLayer.
+func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+ *p = Fragment(data)
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+ bytes, err := b.PrependBytes(len(*p))
+ if err != nil {
+ return err
+ }
+ copy(bytes, *p)
+ return nil
+}
+
+// decodeFragment decodes data by returning it all in a Fragment layer.
+func decodeFragment(data []byte, p PacketBuilder) error {
+ payload := &Fragment{}
+ if err := payload.DecodeFromBytes(data, p); err != nil {
+ return nil
+ }
+ p.AddLayer(payload)
+ p.SetApplicationLayer(payload)
+ return nil
+}
+
+// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their
+// corresponding OSI layers, as best as possible.
+
+// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2)
+type LinkLayer interface {
+ Layer
+ LinkFlow() Flow
+}
+
+// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI
+// layer 3)
+type NetworkLayer interface {
+ Layer
+ NetworkFlow() Flow
+}
+
+// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI
+// layer 4)
+type TransportLayer interface {
+ Layer
+ TransportFlow() Flow
+}
+
+// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI
+// layer 7), also known as the packet payload.
+type ApplicationLayer interface {
+ Layer
+ Payload() []byte
+}
+
+// ErrorLayer is a packet layer created when decoding of the packet has failed.
+// Its payload is all the bytes that we were unable to decode, and the returned
+// error details why the decoding failed.
+type ErrorLayer interface {
+ Layer
+ Error() error
+}
diff --git a/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/google/gopacket/decode.go
new file mode 100644
index 0000000..2633f84
--- /dev/null
+++ b/vendor/github.com/google/gopacket/decode.go
@@ -0,0 +1,157 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "errors"
+)
+
+// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata.
+type DecodeFeedback interface {
+ // SetTruncated should be called if during decoding you notice that a packet
+ // is shorter than internal layer variables (HeaderLength, or the like) say it
+ // should be. It sets packet.Metadata().Truncated.
+ SetTruncated()
+}
+
+type nilDecodeFeedback struct{}
+
+func (nilDecodeFeedback) SetTruncated() {}
+
+// NilDecodeFeedback implements DecodeFeedback by doing nothing.
+var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{}
+
+// PacketBuilder is used by layer decoders to store the layers they've decoded,
+// and to defer future decoding via NextDecoder.
+// Typically, the pattern for use is:
+// func (m *myDecoder) Decode(data []byte, p PacketBuilder) error {
+// if myLayer, err := myDecodingLogic(data); err != nil {
+// return err
+// } else {
+// p.AddLayer(myLayer)
+// }
+// // maybe do this, if myLayer is a LinkLayer
+// p.SetLinkLayer(myLayer)
+// return p.NextDecoder(nextDecoder)
+// }
+type PacketBuilder interface {
+ DecodeFeedback
+ // AddLayer should be called by a decoder immediately upon successful
+ // decoding of a layer.
+ AddLayer(l Layer)
+ // The following functions set the various specific layers in the final
+ // packet. Note that if many layers call SetX, the first call is kept and all
+ // other calls are ignored.
+ SetLinkLayer(LinkLayer)
+ SetNetworkLayer(NetworkLayer)
+ SetTransportLayer(TransportLayer)
+ SetApplicationLayer(ApplicationLayer)
+ SetErrorLayer(ErrorLayer)
+ // NextDecoder should be called by a decoder when they're done decoding a
+ // packet layer but not done with decoding the entire packet. The next
+ // decoder will be called to decode the last AddLayer's LayerPayload.
+ // Because of this, NextDecoder must only be called once all other
+ // PacketBuilder calls have been made. Set*Layer and AddLayer calls after
+ // NextDecoder calls will behave incorrectly.
+ NextDecoder(next Decoder) error
+ // DumpPacketData is used solely for decoding. If you come across an error
+ // you need to diagnose while processing a packet, call this and your packet's
+ // data will be dumped to stderr so you can create a test. This should never
+ // be called from a production decoder.
+ DumpPacketData()
+ // DecodeOptions returns the decode options
+ DecodeOptions() *DecodeOptions
+}
+
+// Decoder is an interface for logic to decode a packet layer. Users may
+// implement a Decoder to handle their own strange packet types, or may use one
+// of the many decoders available in the 'layers' subpackage to decode things
+// for them.
+type Decoder interface {
+ // Decode decodes the bytes of a packet, sending decoded values and other
+ // information to PacketBuilder, and returning an error if unsuccessful. See
+ // the PacketBuilder documentation for more details.
+ Decode([]byte, PacketBuilder) error
+}
+
+// DecodeFunc wraps a function to make it a Decoder.
+type DecodeFunc func([]byte, PacketBuilder) error
+
+// Decode implements Decoder by calling itself.
+func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error {
+ // function, call thyself.
+ return d(data, p)
+}
+
+// DecodePayload is a Decoder that returns a Payload layer containing all
+// remaining bytes.
+var DecodePayload Decoder = DecodeFunc(decodePayload)
+
+// DecodeUnknown is a Decoder that returns an Unknown layer containing all
+// remaining bytes, useful if you run up against a layer that you're unable to
+// decode yet. This layer is considered an ErrorLayer.
+var DecodeUnknown Decoder = DecodeFunc(decodeUnknown)
+
+// DecodeFragment is a Decoder that returns a Fragment layer containing all
+// remaining bytes.
+var DecodeFragment Decoder = DecodeFunc(decodeFragment)
+
+// LayerTypeZero is an invalid layer type, but can be used to determine whether
+// layer type has actually been set correctly.
+var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown})
+
+// LayerTypeDecodeFailure is the layer type for the default error layer.
+var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown})
+
+// LayerTypePayload is the layer type for a payload that we don't try to decode
+// but treat as a success, IE: an application-level payload.
+var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload})
+
+// LayerTypeFragment is the layer type for a fragment of a layer transported
+// by an underlying layer that supports fragmentation.
+var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment})
+
+// DecodeFailure is a packet layer created if decoding of the packet data failed
+// for some reason. It implements ErrorLayer. LayerContents will be the entire
+// set of bytes that failed to parse, and Error will return the reason parsing
+// failed.
+type DecodeFailure struct {
+ data []byte
+ err error
+ stack []byte
+}
+
+// Error returns the error encountered during decoding.
+func (d *DecodeFailure) Error() error { return d.err }
+
+// LayerContents implements Layer.
+func (d *DecodeFailure) LayerContents() []byte { return d.data }
+
+// LayerPayload implements Layer.
+func (d *DecodeFailure) LayerPayload() []byte { return nil }
+
+// String implements fmt.Stringer.
+func (d *DecodeFailure) String() string {
+ return "Packet decoding error: " + d.Error().Error()
+}
+
+// Dump implements Dumper.
+func (d *DecodeFailure) Dump() (s string) {
+ if d.stack != nil {
+ s = string(d.stack)
+ }
+ return
+}
+
+// LayerType returns LayerTypeDecodeFailure
+func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure }
+
+// decodeUnknown "decodes" unsupported data types by returning an error.
+// This decoder will thus always return a DecodeFailure layer.
+func decodeUnknown(data []byte, p PacketBuilder) error {
+ return errors.New("Layer type not currently supported")
+}
diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go
new file mode 100644
index 0000000..8e33e56
--- /dev/null
+++ b/vendor/github.com/google/gopacket/doc.go
@@ -0,0 +1,371 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package gopacket provides packet decoding for the Go language.
+
+gopacket contains many sub-packages with additional functionality you may find
+useful, including:
+
+ * layers: You'll probably use this every time. This contains of the logic
+ built into gopacket for decoding packet protocols. Note that all example
+ code below assumes that you have imported both gopacket and
+ gopacket/layers.
+ * pcap: C bindings to use libpcap to read packets off the wire.
+ * pfring: C bindings to use PF_RING to read packets off the wire.
+ * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire.
+ * tcpassembly: TCP stream reassembly
+
+Also, if you're looking to dive right into code, see the examples subdirectory
+for numerous simple binaries built using gopacket libraries.
+
+Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket,
+and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Basic Usage
+
+gopacket takes in packet data as a []byte and decodes it into a packet with
+a non-zero number of "layers". Each layer corresponds to a protocol
+within the bytes. Once a packet has been decoded, the layers of the packet
+can be requested from the packet.
+
+ // Decode a packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ // Get the TCP layer from this packet
+ if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+ fmt.Println("This is a TCP packet!")
+ // Get actual TCP data from this layer
+ tcp, _ := tcpLayer.(*layers.TCP)
+ fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort)
+ }
+ // Iterate over all layers, printing out each layer type
+ for _, layer := range packet.Layers() {
+ fmt.Println("PACKET LAYER:", layer.LayerType())
+ }
+
+Packets can be decoded from a number of starting points. Many of our base
+types implement Decoder, which allow us to decode packets for which
+we don't have full data.
+
+ // Decode an ethernet packet
+ ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default)
+ // Decode an IPv6 header and everything it contains
+ ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default)
+ // Decode a TCP header and its payload
+ tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default)
+
+
+Reading Packets From A Source
+
+Most of the time, you won't just have a []byte of packet data lying around.
+Instead, you'll want to read packets in from somewhere (file, interface, etc)
+and process them. To do that, you'll want to build a PacketSource.
+
+First, you'll need to construct an object that implements the PacketDataSource
+interface. There are implementations of this interface bundled with gopacket
+in the gopacket/pcap and gopacket/pfring subpackages... see their documentation
+for more information on their usage. Once you have a PacketDataSource, you can
+pass it into NewPacketSource, along with a Decoder of your choice, to create
+a PacketSource.
+
+Once you have a PacketSource, you can read packets from it in multiple ways.
+See the docs for PacketSource for more details. The easiest method is the
+Packets function, which returns a channel, then asynchronously writes new
+packets into that channel, closing the channel if the packetSource hits an
+end-of-file.
+
+ packetSource := ... // construct using pcap or pfring
+ for packet := range packetSource.Packets() {
+ handlePacket(packet) // do something with each packet
+ }
+
+You can change the decoding options of the packetSource by setting fields in
+packetSource.DecodeOptions... see the following sections for more details.
+
+
+Lazy Decoding
+
+gopacket optionally decodes packet data lazily, meaning it
+only decodes a packet layer when it needs to handle a function call.
+
+ // Create a packet, but don't actually decode anything yet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Now, decode the packet up to the first IPv4 layer found but no further.
+ // If no IPv4 layer was found, the whole packet will be decoded looking for
+ // it.
+ ip4 := packet.Layer(layers.LayerTypeIPv4)
+ // Decode all layers and return them. The layers up to the first IPv4 layer
+ // are already decoded, and will not require decoding a second time.
+ layers := packet.Layers()
+
+Lazily-decoded packets are not concurrency-safe. Since layers have not all been
+decoded, each call to Layer() or Layers() has the potential to mutate the packet
+in order to decode the next layer. If a packet is used
+in multiple goroutines concurrently, don't use gopacket.Lazy. Then gopacket
+will decode the packet fully, and all future function calls won't mutate the
+object.
+
+
+NoCopy Decoding
+
+By default, gopacket will copy the slice passed to NewPacket and store the
+copy within the packet, so future mutations to the bytes underlying the slice
+don't affect the packet and its layers. If you can guarantee that the
+underlying slice bytes won't be changed, you can use NoCopy to tell
+gopacket.NewPacket, and it'll use the passed-in slice itself.
+
+ // This channel returns new byte slices, each of which points to a new
+ // memory location that's guaranteed immutable for the duration of the
+ // packet.
+ for data := range myByteSliceChannel {
+ p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+ doSomethingWithPacket(p)
+ }
+
+The fastest method of decoding is to use both Lazy and NoCopy, but note from
+the many caveats above that for some implementations either or both may be
+dangerous.
+
+
+Pointers To Known Layers
+
+During decoding, certain layers are stored in the packet as well-known
+layer types. For example, IPv4 and IPv6 are both considered NetworkLayer
+layers, while TCP and UDP are both TransportLayer layers. We support 4
+layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly
+anagalous to layers 2, 3, 4, and 7 of the OSI model). To access these,
+you can use the packet.LinkLayer, packet.NetworkLayer,
+packet.TransportLayer, and packet.ApplicationLayer functions. Each of
+these functions returns a corresponding interface
+(gopacket.{Link,Network,Transport,Application}Layer). The first three
+provide methods for getting src/dst addresses for that particular layer,
+while the final layer provides a Payload function to get payload data.
+This is helpful, for example, to get payloads for all packets regardless
+of their underlying data type:
+
+ // Get packets from some source
+ for packet := range someSource {
+ if app := packet.ApplicationLayer(); app != nil {
+ if strings.Contains(string(app.Payload()), "magic string") {
+ fmt.Println("Found magic string in a packet!")
+ }
+ }
+ }
+
+A particularly useful layer is ErrorLayer, which is set whenever there's
+an error parsing part of the packet.
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ if err := packet.ErrorLayer(); err != nil {
+ fmt.Println("Error decoding some part of the packet:", err)
+ }
+
+Note that we don't return an error from NewPacket because we may have decoded
+a number of layers successfully before running into our erroneous layer. You
+may still be able to get your Ethernet and IPv4 layers correctly, even if
+your TCP layer is malformed.
+
+
+Flow And Endpoint
+
+gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol
+independent manner the fact that a packet is coming from A and going to B.
+The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide
+methods for extracting their flow information, without worrying about the type
+of the underlying Layer.
+
+A Flow is a simple object made up of a set of two Endpoints, one source and one
+destination. It details the sender and receiver of the Layer of the Packet.
+
+An Endpoint is a hashable representation of a source or destination. For
+example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4
+IP packet. A Flow can be broken into Endpoints, and Endpoints can be combined
+into Flows:
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ netFlow := packet.NetworkLayer().NetworkFlow()
+ src, dst := netFlow.Endpoints()
+ reverseFlow := gopacket.NewFlow(dst, src)
+
+Both Endpoint and Flow objects can be used as map keys, and the equality
+operator can compare them, so you can easily group together all packets
+based on endpoint criteria:
+
+ flows := map[gopacket.Endpoint]chan gopacket.Packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Send all TCP packets to channels based on their destination port.
+ if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil {
+ flows[tcp.TransportFlow().Dst()] <- packet
+ }
+ // Look for all packets with the same source and destination network address
+ if net := packet.NetworkLayer(); net != nil {
+ src, dst := net.NetworkFlow().Endpoints()
+ if src == dst {
+ fmt.Println("Fishy packet has same network source and dst: %s", src)
+ }
+ }
+ // Find all packets coming from UDP port 1000 to UDP port 500
+ interestingFlow := gopacket.NewFlow(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
+ if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
+ fmt.Println("Found that UDP flow I was looking for!")
+ }
+
+For load-balancing purposes, both Flow and Endpoint have FastHash() functions,
+which provide quick, non-cryptographic hashes of their contents. Of particular
+importance is the fact that Flow FastHash() is symmetric: A->B will have the same
+hash as B->A. An example usage could be:
+
+ channels := [8]chan gopacket.Packet
+ for i := 0; i < 8; i++ {
+ channels[i] = make(chan gopacket.Packet)
+ go packetHandler(channels[i])
+ }
+ for packet := range getPackets() {
+ if net := packet.NetworkLayer(); net != nil {
+ channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet
+ }
+ }
+
+This allows us to split up a packet stream while still making sure that each
+stream sees all packets for a flow (and its bidirectional opposite).
+
+
+Implementing Your Own Decoder
+
+If your network has some strange encapsulation, you can implement your own
+decoder. In this example, we handle Ethernet packets which are encapsulated
+in a 4-byte header.
+
+ // Create a layer type, should be unique and high, so it doesn't conflict,
+ // giving it a name and a decoder to use.
+ var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)})
+
+ // Implement my layer
+ type MyLayer struct {
+ StrangeHeader []byte
+ payload []byte
+ }
+ func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType }
+ func (m MyLayer) LayerContents() []byte { return m.StrangeHeader }
+ func (m MyLayer) LayerPayload() []byte { return m.payload }
+
+ // Now implement a decoder... this one strips off the first 4 bytes of the
+ // packet.
+ func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error {
+ // Create my layer
+ p.AddLayer(&MyLayer{data[:4], data[4:]})
+ // Determine how to handle the rest of the packet
+ return p.NextDecoder(layers.LayerTypeEthernet)
+ }
+
+ // Finally, decode your packets:
+ p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy)
+
+See the docs for Decoder and PacketBuilder for more details on how coding
+decoders works, or look at RegisterLayerType and RegisterEndpointType to see how
+to add layer/endpoint types to gopacket.
+
+
+Fast Decoding With DecodingLayerParser
+
+TLDR: DecodingLayerParser takes about 10% of the time as NewPacket to decode
+packet data, but only for known packet stacks.
+
+Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow
+due to its need to allocate a new packet and every respective layer. It's very
+versatile and can handle all known layer types, but sometimes you really only
+care about a specific set of layers regardless, so that versatility is wasted.
+
+DecodingLayerParser avoids memory allocation altogether by decoding packet
+layers directly into preallocated objects, which you can then reference to get
+the packet's information. A quick example:
+
+ func main() {
+ var eth layers.Ethernet
+ var ip4 layers.IPv4
+ var ip6 layers.IPv6
+ var tcp layers.TCP
+ parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp)
+ decoded := []gopacket.LayerType{}
+ for packetData := range somehowGetPacketData() {
+ if err := parser.DecodeLayers(packetData, &decoded); err != nil {
+ fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+ continue
+ }
+ for _, layerType := range decoded {
+ switch layerType {
+ case layers.LayerTypeIPv6:
+ fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
+ case layers.LayerTypeIPv4:
+ fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
+ }
+ }
+ }
+ }
+
+The important thing to note here is that the parser is modifying the passed in
+layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly
+speeding up the decoding process. It's even branching based on layer type...
+it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack. However, it won't
+handle any other type... since no other decoders were passed in, an (eth, ip4,
+udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet,
+LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't
+decode a UDP packet).
+
+Unfortunately, not all layers can be used by DecodingLayerParser... only those
+implementing the DecodingLayer interface are usable. Also, it's possible to
+create DecodingLayers that are not themselves Layers... see
+layers.IPv6ExtensionSkipper for an example of this.
+
+
+Creating Packet Data
+
+As well as offering the ability to decode packet data, gopacket will allow you
+to create packets from scratch, as well. A number of gopacket layers implement
+the SerializableLayer interface; these layers can be serialized to a []byte in
+the following manner:
+
+ ip := &layers.IPv4{
+ SrcIP: net.IP{1, 2, 3, 4},
+ DstIP: net.IP{5, 6, 7, 8},
+ // etc...
+ }
+ buf := gopacket.NewSerializeBuffer()
+ opts := gopacket.SerializeOptions{} // See SerializeOptions for more details.
+ err := ip.SerializeTo(buf, opts)
+ if err != nil { panic(err) }
+ fmt.Println(buf.Bytes()) // prints out a byte slice containing the serialized IPv4 layer.
+
+SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat
+the current buffer's Bytes() slice as the payload of the serializing layer.
+Therefore, you can serialize an entire packet by serializing a set of layers in
+reverse order (Payload, then TCP, then IP, then Ethernet, for example). The
+SerializeBuffer's SerializeLayers function is a helper that does exactly that.
+
+To generate a (empty and useless, because no fields are set)
+Ethernet(IPv4(TCP(Payload))) packet, for example, you can run:
+
+ buf := gopacket.NewSerializeBuffer()
+ opts := gopacket.SerializeOptions{}
+ gopacket.SerializeLayers(buf, opts,
+ &layers.Ethernet{},
+ &layers.IPv4{},
+ &layers.TCP{},
+ gopacket.Payload([]byte{1, 2, 3, 4}))
+ packetData := buf.Bytes()
+
+A Final Note
+
+If you use gopacket, you'll almost definitely want to make sure gopacket/layers
+is imported, since when imported it sets all the LayerType variables and fills
+in a lot of interesting variables/maps (DecodersByLayerName, etc). Therefore,
+it's recommended that even if you don't use any layers functions directly, you still import with:
+
+ import (
+ _ "github.com/google/gopacket/layers"
+ )
+*/
+package gopacket
diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go
new file mode 100644
index 0000000..a00c883
--- /dev/null
+++ b/vendor/github.com/google/gopacket/flows.go
@@ -0,0 +1,236 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+// MaxEndpointSize determines the maximum size in bytes of an endpoint address.
+//
+// Endpoints/Flows have a problem: They need to be hashable. Therefore, they
+// can't use a byte slice. The two obvious choices are to use a string or a
+// byte array. Strings work great, but string creation requires memory
+// allocation, which can be slow. Arrays work great, but have a fixed size. We
+// originally used the former, now we've switched to the latter. Use of a fixed
+// byte-array doubles the speed of constructing a flow (due to not needing to
+// allocate). This is a huge increase... too much for us to pass up.
+//
+// The end result of this, though, is that an endpoint/flow can't be created
+// using more than MaxEndpointSize bytes per address.
+const MaxEndpointSize = 16
+
+// Endpoint is the set of bytes used to address packets at various layers.
+// See LinkLayer, NetworkLayer, and TransportLayer specifications.
+// Endpoints are usable as map keys.
+type Endpoint struct {
+ typ EndpointType
+ len int
+ raw [MaxEndpointSize]byte
+}
+
+// EndpointType returns the endpoint type associated with this endpoint.
+func (a Endpoint) EndpointType() EndpointType { return a.typ }
+
+// Raw returns the raw bytes of this endpoint. These aren't human-readable
+// most of the time, but they are faster than calling String.
+func (a Endpoint) Raw() []byte { return a.raw[:a.len] }
+
+// LessThan provides a stable ordering for all endpoints. It sorts first based
+// on the EndpointType of an endpoint, then based on the raw bytes of that
+// endpoint.
+//
+// For some endpoints, the actual comparison may not make sense, however this
+// ordering does provide useful information for most Endpoint types.
+// Ordering is based first on endpoint type, then on raw endpoint bytes.
+// Endpoint bytes are sorted lexicographically.
+func (a Endpoint) LessThan(b Endpoint) bool {
+ return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0)
+}
+
+// fnvHash is used by our FastHash functions, and implements the FNV hash
+// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
+// See http://isthe.com/chongo/tech/comp/fnv/.
+func fnvHash(s []byte) (h uint64) {
+ h = fnvBasis
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= fnvPrime
+ }
+ return
+}
+
+const fnvBasis = 14695981039346656037
+const fnvPrime = 1099511628211
+
+// FastHash provides a quick hashing function for an endpoint, useful if you'd
+// like to split up endpoints by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (a Endpoint) FastHash() (h uint64) {
+ h = fnvHash(a.raw[:a.len])
+ h ^= uint64(a.typ)
+ h *= fnvPrime
+ return
+}
+
+// NewEndpoint creates a new Endpoint object.
+//
+// The size of raw must be less than MaxEndpointSize, otherwise this function
+// will panic.
+func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) {
+ e.len = len(raw)
+ if e.len > MaxEndpointSize {
+ panic("raw byte length greater than MaxEndpointSize")
+ }
+ e.typ = typ
+ copy(e.raw[:], raw)
+ return
+}
+
+// EndpointTypeMetadata is used to register a new endpoint type.
+type EndpointTypeMetadata struct {
+ // Name is the string returned by an EndpointType's String function.
+ Name string
+ // Formatter is called from an Endpoint's String function to format the raw
+ // bytes in an Endpoint into a human-readable string.
+ Formatter func([]byte) string
+}
+
+// EndpointType is the type of a gopacket Endpoint. This type determines how
+// the bytes stored in the endpoint should be interpreted.
+type EndpointType int64
+
+var endpointTypes = map[EndpointType]EndpointTypeMetadata{}
+
+// RegisterEndpointType creates a new EndpointType and registers it globally.
+// It MUST be passed a unique number, or it will panic. Numbers 0-999 are
+// reserved for gopacket's use.
+func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType {
+ t := EndpointType(num)
+ if _, ok := endpointTypes[t]; ok {
+ panic("Endpoint type number already in use")
+ }
+ endpointTypes[t] = meta
+ return t
+}
+
+func (e EndpointType) String() string {
+ if t, ok := endpointTypes[e]; ok {
+ return t.Name
+ }
+ return strconv.Itoa(int(e))
+}
+
+func (a Endpoint) String() string {
+ if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil {
+ return t.Formatter(a.raw[:a.len])
+ }
+ return fmt.Sprintf("%v:%v", a.typ, a.raw)
+}
+
+// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint.
+// Flows are usable as map keys.
+type Flow struct {
+ typ EndpointType
+ slen, dlen int
+ src, dst [MaxEndpointSize]byte
+}
+
+// FlowFromEndpoints creates a new flow by pasting together two endpoints.
+// The endpoints must have the same EndpointType, or this function will return
+// an error.
+func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) {
+ if src.typ != dst.typ {
+ err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ)
+ return
+ }
+ return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil
+}
+
+// FastHash provides a quick hashing function for a flow, useful if you'd
+// like to split up flows by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide
+// with its reverse flow. IE: the flow A->B will have the same hash as the flow
+// B->A.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (f Flow) FastHash() (h uint64) {
+ // This combination must be commutative. We don't use ^, since that would
+ // give the same hash for all A->A flows.
+ h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen])
+ h ^= uint64(f.typ)
+ h *= fnvPrime
+ return
+}
+
+// String returns a human-readable representation of this flow, in the form
+// "Src->Dst"
+func (f Flow) String() string {
+ s, d := f.Endpoints()
+ return fmt.Sprintf("%v->%v", s, d)
+}
+
+// EndpointType returns the EndpointType for this Flow.
+func (f Flow) EndpointType() EndpointType {
+ return f.typ
+}
+
+// Endpoints returns the two Endpoints for this flow.
+func (f Flow) Endpoints() (src, dst Endpoint) {
+ return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst}
+}
+
+// Src returns the source Endpoint for this flow.
+func (f Flow) Src() (src Endpoint) {
+ src, _ = f.Endpoints()
+ return
+}
+
+// Dst returns the destination Endpoint for this flow.
+func (f Flow) Dst() (dst Endpoint) {
+ _, dst = f.Endpoints()
+ return
+}
+
+// Reverse returns a new flow with endpoints reversed.
+func (f Flow) Reverse() Flow {
+ return Flow{f.typ, f.dlen, f.slen, f.dst, f.src}
+}
+
+// NewFlow creates a new flow.
+//
+// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will
+// panic.
+func NewFlow(t EndpointType, src, dst []byte) (f Flow) {
+ f.slen = len(src)
+ f.dlen = len(dst)
+ if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize {
+ panic("flow raw byte length greater than MaxEndpointSize")
+ }
+ f.typ = t
+ copy(f.src[:], src)
+ copy(f.dst[:], dst)
+ return
+}
+
+// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints
+// that are specified incorrectly during creation.
+var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string {
+ return fmt.Sprintf("%v", b)
+}})
+
+// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid.
+var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil)
+
+// InvalidFlow is a singleton Flow of type EndpointInvalid.
+var InvalidFlow = NewFlow(EndpointInvalid, nil, nil)
diff --git a/vendor/github.com/google/gopacket/gc b/vendor/github.com/google/gopacket/gc
new file mode 100644
index 0000000..b1d8d2e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/gc
@@ -0,0 +1,288 @@
+#!/bin/bash
+# Copyright 2012 Google, Inc. All rights reserved.
+
+# This script provides a simple way to run benchmarks against previous code and
+# keep a log of how benchmarks change over time. When used with the --benchmark
+# flag, it runs benchmarks from the current code and from the last commit run
+# with --benchmark, then stores the results in the git commit description. We
+# rerun the old benchmarks along with the new ones, since there's no guarantee
+# that git commits will happen on the same machine, so machine differences could
+# cause wildly inaccurate results.
+#
+# If you're making changes to 'gopacket' which could cause performance changes,
+# you may be requested to use this commit script to make sure your changes don't
+# have large detrimental effects (or to show off how awesome your performance
+# improvements are).
+#
+# If not run with the --benchmark flag, this script is still very useful... it
+# makes sure all the correct go formatting, building, and testing work as
+# expected.
+
+function Usage {
+ cat <<EOF
+USAGE: $0 [--benchmark regexp] [--root] [--gen] <git commit flags...>
+
+--benchmark: Run benchmark comparisons against last benchmark'd commit
+--root: Run tests that require root priviledges
+--gen: Generate code for MACs/ports by pulling down external data
+
+Note, some 'git commit' flags are necessary, if all else fails, pass in -a
+EOF
+ exit 1
+}
+
+BENCH=""
+GEN=""
+ROOT=""
+while [ ! -z "$1" ]; do
+ case "$1" in
+ "--benchmark")
+ BENCH="$2"
+ shift
+ shift
+ ;;
+ "--gen")
+ GEN="yes"
+ shift
+ ;;
+ "--root")
+ ROOT="yes"
+ shift
+ ;;
+ "--help")
+ Usage
+ ;;
+ "-h")
+ Usage
+ ;;
+ "help")
+ Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+function Root {
+ if [ ! -z "$ROOT" ]; then
+ local exec="$1"
+ # Some folks (like me) keep source code in places inaccessible by root (like
+ # NFS), so to make sure things run smoothly we copy them to a /tmp location.
+ local tmpfile="$(mktemp -t gopacket_XXXXXXXX)"
+ echo "Running root test executable $exec as $tmpfile"
+ cp "$exec" "$tmpfile"
+ chmod a+x "$tmpfile"
+ shift
+ sudo "$tmpfile" "$@"
+ fi
+}
+
+if [ "$#" -eq "0" ]; then
+ Usage
+fi
+
+cd $(dirname $0)
+
+# Check for copyright notices.
+for filename in $(find ./ -type f -name '*.go'); do
+ if ! head -n 1 "$filename" | grep -q Copyright; then
+ echo "File '$filename' may not have copyright notice"
+ exit 1
+ fi
+done
+
+set -e
+set -x
+
+if [ ! -z "$ROOT" ]; then
+ echo "Running SUDO to get root priviledges for root tests"
+ sudo echo "have root"
+fi
+
+if [ ! -z "$GEN" ]; then
+ pushd macs
+ go run gen.go | gofmt > valid_mac_prefixes.go
+ popd
+ pushd layers
+ go run gen.go | gofmt > iana_ports.go
+ go run gen2.go | gofmt > enums_generated.go
+ popd
+fi
+
+# Make sure everything is formatted, compiles, and tests pass.
+go fmt ./...
+go test -i ./... 2>/dev/null >/dev/null || true
+go test
+go build
+pushd examples/bytediff
+go build
+popd
+if [ -f /usr/include/pcap.h ]; then
+ pushd pcap
+ go test ./...
+ go build ./...
+ go build pcap_tester.go
+ Root pcap_tester --mode=basic
+ Root pcap_tester --mode=filtered
+ Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources"
+ popd
+ pushd examples/afpacket
+ go build
+ popd
+ pushd examples/pcapdump
+ go build
+ popd
+ pushd examples/arpscan
+ go build
+ popd
+ pushd examples/bidirectional
+ go build
+ popd
+ pushd examples/synscan
+ go build
+ popd
+ pushd examples/httpassembly
+ go build
+ popd
+ pushd examples/statsassembly
+ go build
+ popd
+fi
+pushd macs
+go test ./...
+gofmt -w gen.go
+go build gen.go
+popd
+pushd tcpassembly
+go test ./...
+popd
+pushd reassembly
+go test ./...
+popd
+pushd layers
+gofmt -w gen.go
+go build gen.go
+go test ./...
+popd
+pushd pcapgo
+go test ./...
+go build ./...
+popd
+if [ -f /usr/include/linux/if_packet.h ]; then
+ if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then
+ pushd afpacket
+ go build ./...
+ go test ./...
+ popd
+ fi
+fi
+if [ -f /usr/include/pfring.h ]; then
+ pushd pfring
+ go test ./...
+ go build ./...
+ popd
+ pushd examples/pfdump
+ go build
+ popd
+fi
+pushd ip4defrag
+go test ./...
+popd
+pushd defrag
+go test ./...
+popd
+
+for travis_script in `ls .travis.*.sh`; do
+ ./$travis_script
+done
+
+# Run our initial commit
+git commit "$@"
+
+if [ -z "$BENCH" ]; then
+ set +x
+ echo "We're not benchmarking and we've committed... we're done!"
+ exit
+fi
+
+### If we get here, we want to run benchmarks from current commit, and compare
+### then to benchmarks from the last --benchmark commit.
+
+# Get our current branch.
+BRANCH="$(git branch | grep '^*' | awk '{print $2}')"
+
+# File we're going to build our commit description in.
+COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)"
+
+# Add the word "BENCH" to the start of the git commit.
+echo -n "BENCH " > $COMMIT_FILE
+
+# Get the current description... there must be an easier way.
+git log -n 1 | grep '^ ' | sed 's/^ //' >> $COMMIT_FILE
+
+# Get the commit sha for the last benchmark commit
+PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}')
+
+## Run current benchmarks
+
+cat >> $COMMIT_FILE <<EOF
+
+
+----------------------------------------------------------
+BENCHMARK_MARKER_DO_NOT_CHANGE
+----------------------------------------------------------
+
+Go version $(go version)
+
+
+TEST BENCHMARKS "$BENCH"
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+ go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset to last benchmark commit, run benchmarks
+
+git checkout $PREV
+
+cat >> $COMMIT_FILE <<EOF
+----------------------------------------------------------
+BENCHMARKING AGAINST COMMIT $PREV
+----------------------------------------------------------
+
+
+OLD TEST BENCHMARKS
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+OLD PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+ go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset back to the most recent commit, edit the commit message by appending
+## benchmark results.
+git checkout $BRANCH
+git commit --amend -F $COMMIT_FILE
diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod
new file mode 100644
index 0000000..99e99f4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.mod
@@ -0,0 +1,8 @@
+module github.com/google/gopacket
+
+go 1.12
+
+require (
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+ golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67
+)
diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum
new file mode 100644
index 0000000..2b28942
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.sum
@@ -0,0 +1,7 @@
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/google/gopacket/layerclass.go
new file mode 100644
index 0000000..775cd09
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layerclass.go
@@ -0,0 +1,107 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+// LayerClass is a set of LayerTypes, used for grabbing one of a number of
+// different types from a packet.
+type LayerClass interface {
+ // Contains returns true if the given layer type should be considered part
+ // of this layer class.
+ Contains(LayerType) bool
+ // LayerTypes returns the set of all layer types in this layer class.
+ // Note that this may not be a fast operation on all LayerClass
+ // implementations.
+ LayerTypes() []LayerType
+}
+
+// Contains implements LayerClass.
+func (l LayerType) Contains(a LayerType) bool {
+ return l == a
+}
+
+// LayerTypes implements LayerClass.
+func (l LayerType) LayerTypes() []LayerType {
+ return []LayerType{l}
+}
+
+// LayerClassSlice implements a LayerClass with a slice.
+type LayerClassSlice []bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (s LayerClassSlice) Contains(t LayerType) bool {
+ return int(t) < len(s) && s[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassSlice.
+// Because of LayerClassSlice's implementation, this could be quite slow.
+func (s LayerClassSlice) LayerTypes() (all []LayerType) {
+ for i := 0; i < len(s); i++ {
+ if s[i] {
+ all = append(all, LayerType(i))
+ }
+ }
+ return
+}
+
+// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of
+// size max(types) and setting slice[t] to true for each type t. Note, if
+// you implement your own LayerType and give it a high value, this WILL create
+// a very large slice.
+func NewLayerClassSlice(types []LayerType) LayerClassSlice {
+ var max LayerType
+ for _, typ := range types {
+ if typ > max {
+ max = typ
+ }
+ }
+ t := make([]bool, int(max+1))
+ for _, typ := range types {
+ t[typ] = true
+ }
+ return t
+}
+
+// LayerClassMap implements a LayerClass with a map.
+type LayerClassMap map[LayerType]bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (m LayerClassMap) Contains(t LayerType) bool {
+ return m[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassMap.
+func (m LayerClassMap) LayerTypes() (all []LayerType) {
+ for t := range m {
+ all = append(all, t)
+ }
+ return
+}
+
+// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each
+// type in types.
+func NewLayerClassMap(types []LayerType) LayerClassMap {
+ m := LayerClassMap{}
+ for _, typ := range types {
+ m[typ] = true
+ }
+ return m
+}
+
+// NewLayerClass creates a LayerClass, attempting to be smart about which type
+// it creates based on which types are passed in.
+func NewLayerClass(types []LayerType) LayerClass {
+ for _, typ := range types {
+ if typ > maxLayerType {
+ // NewLayerClassSlice could create a very large object, so instead create
+ // a map.
+ return NewLayerClassMap(types)
+ }
+ }
+ return NewLayerClassSlice(types)
+}
diff --git a/vendor/github.com/google/gopacket/layers/.lint_blacklist b/vendor/github.com/google/gopacket/layers/.lint_blacklist
new file mode 100644
index 0000000..fded4f6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/.lint_blacklist
@@ -0,0 +1,39 @@
+dot11.go
+eap.go
+endpoints.go
+enums_generated.go
+enums.go
+ethernet.go
+geneve.go
+icmp4.go
+icmp6.go
+igmp.go
+ip4.go
+ip6.go
+layertypes.go
+linux_sll.go
+llc.go
+lldp.go
+mpls.go
+ndp.go
+ntp.go
+ospf.go
+pflog.go
+pppoe.go
+prism.go
+radiotap.go
+rudp.go
+sctp.go
+sflow.go
+tcp.go
+tcpip.go
+tls.go
+tls_alert.go
+tls_appdata.go
+tls_cipherspec.go
+tls_hanshake.go
+tls_test.go
+udp.go
+udplite.go
+usb.go
+vrrp.go
diff --git a/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/google/gopacket/layers/arp.go
new file mode 100644
index 0000000..49e05ac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/arp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// Potential values for ARP.Operation.
+const (
+ ARPRequest = 1
+ ARPReply = 2
+)
+
+// ARP is a ARP packet header.
+type ARP struct {
+ BaseLayer
+ AddrType LinkType
+ Protocol EthernetType
+ HwAddressSize uint8
+ ProtAddressSize uint8
+ Operation uint16
+ SourceHwAddress []byte
+ SourceProtAddress []byte
+ DstHwAddress []byte
+ DstProtAddress []byte
+}
+
+// LayerType returns LayerTypeARP
+func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2]))
+ arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ arp.HwAddressSize = data[4]
+ arp.ProtAddressSize = data[5]
+ arp.Operation = binary.BigEndian.Uint16(data[6:8])
+ arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize]
+ arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize]
+ arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize]
+ arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize]
+
+ arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize
+ arp.Contents = data[:arpLength]
+ arp.Payload = data[arpLength:]
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress)
+ bytes, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ if opts.FixLengths {
+ if len(arp.SourceHwAddress) != len(arp.DstHwAddress) {
+ return errors.New("mismatched hardware address sizes")
+ }
+ arp.HwAddressSize = uint8(len(arp.SourceHwAddress))
+ if len(arp.SourceProtAddress) != len(arp.DstProtAddress) {
+ return errors.New("mismatched prot address sizes")
+ }
+ arp.ProtAddressSize = uint8(len(arp.SourceProtAddress))
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol))
+ bytes[4] = arp.HwAddressSize
+ bytes[5] = arp.ProtAddressSize
+ binary.BigEndian.PutUint16(bytes[6:], arp.Operation)
+ start := 8
+ for _, addr := range [][]byte{
+ arp.SourceHwAddress,
+ arp.SourceProtAddress,
+ arp.DstHwAddress,
+ arp.DstProtAddress,
+ } {
+ copy(bytes[start:], addr)
+ start += len(addr)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (arp *ARP) CanDecode() gopacket.LayerClass {
+ return LayerTypeARP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (arp *ARP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeARP(data []byte, p gopacket.PacketBuilder) error {
+
+ arp := &ARP{}
+ return decodingLayerDecoder(arp, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/google/gopacket/layers/base.go
new file mode 100644
index 0000000..cd59b46
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/base.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// BaseLayer is a convenience struct which implements the LayerData and
+// LayerPayload functions of the Layer interface.
+type BaseLayer struct {
+ // Contents is the set of bytes that make up this layer. IE: for an
+ // Ethernet packet, this would be the set of bytes making up the
+ // Ethernet frame.
+ Contents []byte
+ // Payload is the set of bytes contained by (but not part of) this
+ // Layer. Again, to take Ethernet as an example, this would be the
+ // set of bytes encapsulated by the Ethernet protocol.
+ Payload []byte
+}
+
+// LayerContents returns the bytes of the packet layer.
+func (b *BaseLayer) LayerContents() []byte { return b.Contents }
+
+// LayerPayload returns the bytes contained within the packet layer.
+func (b *BaseLayer) LayerPayload() []byte { return b.Payload }
+
+type layerDecodingLayer interface {
+ gopacket.Layer
+ DecodeFromBytes([]byte, gopacket.DecodeFeedback) error
+ NextLayerType() gopacket.LayerType
+}
+
+func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error {
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ next := d.NextLayerType()
+ if next == gopacket.LayerTypeZero {
+ return nil
+ }
+ return p.NextDecoder(next)
+}
+
+// hacky way to zero out memory... there must be a better way?
+var lotsOfZeros [1024]byte
diff --git a/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/google/gopacket/layers/bfd.go
new file mode 100644
index 0000000..43030fb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/bfd.go
@@ -0,0 +1,481 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// BFD Control Packet Format
+// -------------------------
+// The current version of BFD's RFC (RFC 5880) contains the following
+// diagram for the BFD Control packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | My Discriminator |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Your Discriminator |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Desired Min TX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Required Min RX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Required Min Echo RX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// An optional Authentication Section MAY be present:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Authentication Data... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Simple Password Authentication Section Format
+// ---------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Password... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format
+// ----------------------------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Sequence Number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Key/Digest... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format
+// ------------------------------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Sequence Number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Key/Hash... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// From https://tools.ietf.org/rfc/rfc5880.txt
+const bfdMinimumRecordSizeInBytes int = 24
+
+// BFDVersion represents the version as decoded from the BFD control message
+type BFDVersion uint8
+
+// BFDDiagnostic represents diagnostic infomation about a BFD session
+type BFDDiagnostic uint8
+
+// constants that define BFDDiagnostic flags
+const (
+ BFDDiagnosticNone BFDDiagnostic = 0 // No Diagnostic
+ BFDDiagnosticTimeExpired BFDDiagnostic = 1 // Control Detection Time Expired
+ BFDDiagnosticEchoFailed BFDDiagnostic = 2 // Echo Function Failed
+ BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down
+ BFDDiagnosticForwardPlaneReset BFDDiagnostic = 4 // Forwarding Plane Reset
+ BFDDiagnosticPathDown BFDDiagnostic = 5 // Path Down
+ BFDDiagnosticConcatPathDown BFDDiagnostic = 6 // Concatenated Path Down
+ BFDDiagnosticAdminDown BFDDiagnostic = 7 // Administratively Down
+ BFDDiagnosticRevConcatPathDown BFDDiagnostic = 8 // Reverse Concatenated Path Dow
+)
+
+// String returns a string version of BFDDiagnostic
+func (bd BFDDiagnostic) String() string {
+ switch bd {
+ default:
+ return "Unknown"
+ case BFDDiagnosticNone:
+ return "None"
+ case BFDDiagnosticTimeExpired:
+ return "Control Detection Time Expired"
+ case BFDDiagnosticEchoFailed:
+ return "Echo Function Failed"
+ case BFDDiagnosticNeighborSignalDown:
+ return "Neighbor Signaled Session Down"
+ case BFDDiagnosticForwardPlaneReset:
+ return "Forwarding Plane Reset"
+ case BFDDiagnosticPathDown:
+ return "Path Down"
+ case BFDDiagnosticConcatPathDown:
+ return "Concatenated Path Down"
+ case BFDDiagnosticAdminDown:
+ return "Administratively Down"
+ case BFDDiagnosticRevConcatPathDown:
+ return "Reverse Concatenated Path Down"
+ }
+}
+
+// BFDState represents the state of a BFD session
+type BFDState uint8
+
+// constants that define BFDState
+const (
+ BFDStateAdminDown BFDState = 0
+ BFDStateDown BFDState = 1
+ BFDStateInit BFDState = 2
+ BFDStateUp BFDState = 3
+)
+
+// String returns a string version of BFDState
+func (s BFDState) String() string {
+ switch s {
+ default:
+ return "Unknown"
+ case BFDStateAdminDown:
+ return "Admin Down"
+ case BFDStateDown:
+ return "Down"
+ case BFDStateInit:
+ return "Init"
+ case BFDStateUp:
+ return "Up"
+ }
+}
+
+// BFDDetectMultiplier represents the negotiated transmit interval,
+// multiplied by this value, provides the Detection Time for the
+// receiving system in Asynchronous mode.
+type BFDDetectMultiplier uint8
+
+// BFDDiscriminator is a unique, nonzero discriminator value used
+// to demultiplex multiple BFD sessions between the same pair of systems.
+type BFDDiscriminator uint32
+
+// BFDTimeInterval represents a time interval in microseconds
+type BFDTimeInterval uint32
+
+// BFDAuthType represents the authentication used in the BFD session
+type BFDAuthType uint8
+
+// constants that define the BFDAuthType
+const (
+ BFDAuthTypeNone BFDAuthType = 0 // No Auth
+ BFDAuthTypePassword BFDAuthType = 1 // Simple Password
+ BFDAuthTypeKeyedMD5 BFDAuthType = 2 // Keyed MD5
+ BFDAuthTypeMeticulousKeyedMD5 BFDAuthType = 3 // Meticulous Keyed MD5
+ BFDAuthTypeKeyedSHA1 BFDAuthType = 4 // Keyed SHA1
+ BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1
+)
+
+// String returns a string version of BFDAuthType
+func (at BFDAuthType) String() string {
+ switch at {
+ default:
+ return "Unknown"
+ case BFDAuthTypeNone:
+ return "No Authentication"
+ case BFDAuthTypePassword:
+ return "Simple Password"
+ case BFDAuthTypeKeyedMD5:
+ return "Keyed MD5"
+ case BFDAuthTypeMeticulousKeyedMD5:
+ return "Meticulous Keyed MD5"
+ case BFDAuthTypeKeyedSHA1:
+ return "Keyed SHA1"
+ case BFDAuthTypeMeticulousKeyedSHA1:
+ return "Meticulous Keyed SHA1"
+ }
+}
+
+// BFDAuthKeyID represents the authentication key ID in use for
+// this packet. This allows multiple keys to be active simultaneously.
+type BFDAuthKeyID uint8
+
+// BFDAuthSequenceNumber represents the sequence number for this packet.
+// For Keyed Authentication, this value is incremented occasionally. For
+// Meticulous Keyed Authentication, this value is incremented for each
+// successive packet transmitted for a session. This provides protection
+// against replay attacks.
+type BFDAuthSequenceNumber uint32
+
+// BFDAuthData represents the authentication key or digest
+type BFDAuthData []byte
+
+// BFDAuthHeader represents authentication data used in the BFD session
+type BFDAuthHeader struct {
+ AuthType BFDAuthType
+ KeyID BFDAuthKeyID
+ SequenceNumber BFDAuthSequenceNumber
+ Data BFDAuthData
+}
+
+// Length returns the data length of the BFDAuthHeader based on the
+// authentication type
+func (h *BFDAuthHeader) Length() int {
+ switch h.AuthType {
+ case BFDAuthTypePassword:
+ return 3 + len(h.Data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ return 8 + len(h.Data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ return 8 + len(h.Data)
+ default:
+ return 0
+ }
+}
+
+// BFD represents a BFD control message packet whose payload contains
+// the control information required to for a BFD session.
+//
+// References
+// ----------
+//
+// Wikipedia's BFD entry:
+// https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection
+// This is the best place to get an overview of BFD.
+//
+// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010)
+// https://tools.ietf.org/html/rfc5880
+// This is the original BFD specification.
+//
+// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010)
+// https://tools.ietf.org/html/rfc5881
+// Describes the use of the Bidirectional Forwarding Detection (BFD)
+// protocol over IPv4 and IPv6 for single IP hops.
+type BFD struct {
+ BaseLayer // Stores the packet bytes and payload bytes.
+
+ Version BFDVersion // Version of the BFD protocol.
+ Diagnostic BFDDiagnostic // Diagnostic code for last state change
+ State BFDState // Current state
+ Poll bool // Requesting verification
+ Final bool // Responding to a received BFD Control packet that had the Poll (P) bit set.
+ ControlPlaneIndependent bool // BFD implementation does not share fate with its control plane
+ AuthPresent bool // Authentication Section is present and the session is to be authenticated
+ Demand bool // Demand mode is active
+ Multipoint bool // For future point-to-multipoint extensions. Must always be zero
+ DetectMultiplier BFDDetectMultiplier // Detection time multiplier
+ MyDiscriminator BFDDiscriminator // A unique, nonzero discriminator value
+ YourDiscriminator BFDDiscriminator // discriminator received from the remote system.
+ DesiredMinTxInterval BFDTimeInterval // Minimum interval, in microseconds, the local system would like to use when transmitting BFD Control packets
+ RequiredMinRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting
+ RequiredMinEchoRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting
+ AuthHeader *BFDAuthHeader // Authentication data, variable length.
+}
+
+// Length returns the data length of a BFD Control message which
+// changes based on the presence and type of authentication
+// contained in the message
+func (d *BFD) Length() int {
+ if d.AuthPresent && (d.AuthHeader != nil) {
+ return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length()
+ }
+
+ return bfdMinimumRecordSizeInBytes
+}
+
+// LayerType returns the layer type of the BFD object, which is LayerTypeBFD.
+func (d *BFD) LayerType() gopacket.LayerType {
+ return LayerTypeBFD
+}
+
+// decodeBFD analyses a byte slice and attempts to decode it as a BFD
+// control packet
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the BFD layer.
+func decodeBFD(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &BFD{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return nil
+}
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD
+// control packet.
+//
+// Upon succeeds, it loads the BFD object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a BFD record, then return an error.
+ if len(data) < bfdMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("BFD packet too short")
+ }
+
+ pLen := uint8(data[3])
+ if len(data) != int(pLen) {
+ return errors.New("BFD packet length does not match")
+ }
+
+ // BFD type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level.
+ // Payload is supposed to contain the payload of this level.
+ // Here we set the baselayer to be the bytes of the BFD record.
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // To make sense of this, refer to the packet diagram
+ // above and the section on endian conventions.
+
+ // The first few fields are all packed into the first 32 bits. Unpack them.
+ d.Version = BFDVersion(((data[0] & 0xE0) >> 5))
+ d.Diagnostic = BFDDiagnostic(data[0] & 0x1F)
+ data = data[1:]
+
+ d.State = BFDState((data[0] & 0xC0) >> 6)
+ d.Poll = data[0]&0x20 != 0
+ d.Final = data[0]&0x10 != 0
+ d.ControlPlaneIndependent = data[0]&0x08 != 0
+ d.AuthPresent = data[0]&0x04 != 0
+ d.Demand = data[0]&0x02 != 0
+ d.Multipoint = data[0]&0x01 != 0
+ data = data[1:]
+
+ data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0])
+ data, _ = data[1:], uint8(data[0]) // Consume length
+
+ // The remaining fields can just be copied in big endian order.
+ data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+ data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+ data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+ data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+ data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+
+ if d.AuthPresent && (len(data) > 2) {
+ d.AuthHeader = &BFDAuthHeader{}
+ data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0])
+ data, _ = data[1:], uint8(data[0]) // Consume length
+ data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0])
+
+ switch d.AuthHeader.AuthType {
+ case BFDAuthTypePassword:
+ d.AuthHeader.Data = BFDAuthData(data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ // Skipped reserved byte
+ data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+ d.AuthHeader.Data = BFDAuthData(data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ // Skipped reserved byte
+ data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+ d.AuthHeader.Data = BFDAuthData(data)
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes)
+ if err != nil {
+ return err
+ }
+
+ // Pack the first few fields into the first 32 bits.
+ data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic))
+ h := uint8(0)
+ h |= (uint8(d.State) << 6)
+ h |= (uint8(bool2uint8(d.Poll)) << 5)
+ h |= (uint8(bool2uint8(d.Final)) << 4)
+ h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3)
+ h |= (uint8(bool2uint8(d.AuthPresent)) << 2)
+ h |= (uint8(bool2uint8(d.Demand)) << 1)
+ h |= uint8(bool2uint8(d.Multipoint))
+ data[1] = byte(h)
+ data[2] = byte(d.DetectMultiplier)
+ data[3] = byte(d.Length())
+
+ // The remaining fields can just be copied in big endian order.
+ binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator))
+ binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator))
+ binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval))
+ binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval))
+ binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval))
+
+ if d.AuthPresent && (d.AuthHeader != nil) {
+ auth, err := b.AppendBytes(int(d.AuthHeader.Length()))
+ if err != nil {
+ return err
+ }
+
+ auth[0] = byte(d.AuthHeader.AuthType)
+ auth[1] = byte(d.AuthHeader.Length())
+ auth[2] = byte(d.AuthHeader.KeyID)
+
+ switch d.AuthHeader.AuthType {
+ case BFDAuthTypePassword:
+ copy(auth[3:], d.AuthHeader.Data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ auth[3] = byte(0)
+ binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+ copy(auth[8:], d.AuthHeader.Data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ auth[3] = byte(0)
+ binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+ copy(auth[8:], d.AuthHeader.Data)
+ }
+ }
+
+ return nil
+}
+
+// CanDecode returns a set of layers that BFD objects can decode.
+// As BFD objects can only decide the BFD layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *BFD) CanDecode() gopacket.LayerClass {
+ return LayerTypeBFD
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (BFD) layer. As BFD packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *BFD) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// Payload returns an empty byte slice as BFD packets do not carry a payload
+func (d *BFD) Payload() []byte {
+ return nil
+}
+
+// bool2uint8 converts a bool to uint8
+func bool2uint8(b bool) uint8 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/google/gopacket/layers/cdp.go
new file mode 100644
index 0000000..d67203e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/cdp.go
@@ -0,0 +1,651 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm
+// https://code.google.com/p/ladvd/
+// http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet.
+type CDPTLVType uint16
+
+// CDPTLVType values.
+const (
+ CDPTLVDevID CDPTLVType = 0x0001
+ CDPTLVAddress CDPTLVType = 0x0002
+ CDPTLVPortID CDPTLVType = 0x0003
+ CDPTLVCapabilities CDPTLVType = 0x0004
+ CDPTLVVersion CDPTLVType = 0x0005
+ CDPTLVPlatform CDPTLVType = 0x0006
+ CDPTLVIPPrefix CDPTLVType = 0x0007
+ CDPTLVHello CDPTLVType = 0x0008
+ CDPTLVVTPDomain CDPTLVType = 0x0009
+ CDPTLVNativeVLAN CDPTLVType = 0x000a
+ CDPTLVFullDuplex CDPTLVType = 0x000b
+ CDPTLVVLANReply CDPTLVType = 0x000e
+ CDPTLVVLANQuery CDPTLVType = 0x000f
+ CDPTLVPower CDPTLVType = 0x0010
+ CDPTLVMTU CDPTLVType = 0x0011
+ CDPTLVExtendedTrust CDPTLVType = 0x0012
+ CDPTLVUntrustedCOS CDPTLVType = 0x0013
+ CDPTLVSysName CDPTLVType = 0x0014
+ CDPTLVSysOID CDPTLVType = 0x0015
+ CDPTLVMgmtAddresses CDPTLVType = 0x0016
+ CDPTLVLocation CDPTLVType = 0x0017
+ CDPTLVExternalPortID CDPTLVType = 0x0018
+ CDPTLVPowerRequested CDPTLVType = 0x0019
+ CDPTLVPowerAvailable CDPTLVType = 0x001a
+ CDPTLVPortUnidirectional CDPTLVType = 0x001b
+ CDPTLVEnergyWise CDPTLVType = 0x001d
+ CDPTLVSparePairPOE CDPTLVType = 0x001f
+)
+
+// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer.
+type CiscoDiscoveryValue struct {
+ Type CDPTLVType
+ Length uint16
+ Value []byte
+}
+
+// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol.
+// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885
+type CiscoDiscovery struct {
+ BaseLayer
+ Version byte
+ TTL byte
+ Checksum uint16
+ Values []CiscoDiscoveryValue
+}
+
+// CDPCapability is the set of capabilities advertised by a CDP device.
+type CDPCapability uint32
+
+// CDPCapability values.
+const (
+ CDPCapMaskRouter CDPCapability = 0x0001
+ CDPCapMaskTBBridge CDPCapability = 0x0002
+ CDPCapMaskSPBridge CDPCapability = 0x0004
+ CDPCapMaskSwitch CDPCapability = 0x0008
+ CDPCapMaskHost CDPCapability = 0x0010
+ CDPCapMaskIGMPFilter CDPCapability = 0x0020
+ CDPCapMaskRepeater CDPCapability = 0x0040
+ CDPCapMaskPhone CDPCapability = 0x0080
+ CDPCapMaskRemote CDPCapability = 0x0100
+)
+
+// CDPCapabilities represents the capabilities of a device
+type CDPCapabilities struct {
+ L3Router bool
+ TBBridge bool
+ SPBridge bool
+ L2Switch bool
+ IsHost bool
+ IGMPFilter bool
+ L1Repeater bool
+ IsPhone bool
+ RemotelyManaged bool
+}
+
+// CDP Power-over-Ethernet values.
+const (
+ CDPPoEFourWire byte = 0x01
+ CDPPoEPDArch byte = 0x02
+ CDPPoEPDRequest byte = 0x04
+ CDPPoEPSE byte = 0x08
+)
+
+// CDPSparePairPoE provides information on PoE.
+type CDPSparePairPoE struct {
+ PSEFourWire bool // Supported / Not supported
+ PDArchShared bool // Shared / Independent
+ PDRequestOn bool // On / Off
+ PSEOn bool // On / Off
+}
+
+// CDPVLANDialogue encapsulates a VLAN Query/Reply
+type CDPVLANDialogue struct {
+ ID uint8
+ VLAN uint16
+}
+
+// CDPPowerDialogue encapsulates a Power Query/Reply
+type CDPPowerDialogue struct {
+ ID uint16
+ MgmtID uint16
+ Values []uint32
+}
+
+// CDPLocation provides location information for a CDP device.
+type CDPLocation struct {
+ Type uint8 // Undocumented
+ Location string
+}
+
+// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields)
+type CDPHello struct {
+ OUI []byte
+ ProtocolID uint16
+ ClusterMaster net.IP
+ Unknown1 net.IP
+ Version byte
+ SubVersion byte
+ Status byte
+ Unknown2 byte
+ ClusterCommander net.HardwareAddr
+ SwitchMAC net.HardwareAddr
+ Unknown3 byte
+ ManagementVLAN uint16
+}
+
+// CDPEnergyWiseSubtype is used within CDP to define TLV values.
+type CDPEnergyWiseSubtype uint32
+
+// CDPEnergyWiseSubtype values.
+const (
+ CDPEnergyWiseRole CDPEnergyWiseSubtype = 0x00000007
+ CDPEnergyWiseDomain CDPEnergyWiseSubtype = 0x00000008
+ CDPEnergyWiseName CDPEnergyWiseSubtype = 0x00000009
+ CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017
+)
+
+// CDPEnergyWise is used by CDP to monitor and control power usage.
+type CDPEnergyWise struct {
+ EncryptedData []byte
+ Unknown1 uint32
+ SequenceNumber uint32
+ ModelNumber string
+ Unknown2 uint16
+ HardwareID string
+ SerialNum string
+ Unknown3 []byte
+ Role string
+ Domain string
+ Name string
+ ReplyUnknown1 []byte
+ ReplyPort []byte
+ ReplyAddress []byte
+ ReplyUnknown2 []byte
+ ReplyUnknown3 []byte
+}
+
+// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues
+type CiscoDiscoveryInfo struct {
+ BaseLayer
+ CDPHello
+ DeviceID string
+ Addresses []net.IP
+ PortID string
+ Capabilities CDPCapabilities
+ Version string
+ Platform string
+ IPPrefixes []net.IPNet
+ VTPDomain string
+ NativeVLAN uint16
+ FullDuplex bool
+ VLANReply CDPVLANDialogue
+ VLANQuery CDPVLANDialogue
+ PowerConsumption uint16
+ MTU uint32
+ ExtendedTrust uint8
+ UntrustedCOS uint8
+ SysName string
+ SysOID string
+ MgmtAddresses []net.IP
+ Location CDPLocation
+ PowerRequest CDPPowerDialogue
+ PowerAvailable CDPPowerDialogue
+ SparePairPoe CDPSparePairPoE
+ EnergyWise CDPEnergyWise
+ Unknown []CiscoDiscoveryValue
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscovery.
+func (c *CiscoDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeCiscoDiscovery
+}
+
+func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ c := &CiscoDiscovery{
+ Version: data[0],
+ TTL: data[1],
+ Checksum: binary.BigEndian.Uint16(data[2:4]),
+ }
+ if c.Version != 1 && c.Version != 2 {
+ return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version)
+ }
+ var err error
+ c.Values, err = decodeCiscoDiscoveryTLVs(data[4:])
+ if err != nil {
+ return err
+ }
+ c.Contents = data[0:4]
+ c.Payload = data[4:]
+ p.AddLayer(c)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo))
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo.
+func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType {
+ return LayerTypeCiscoDiscoveryInfo
+}
+
+func decodeCiscoDiscoveryTLVs(data []byte) (values []CiscoDiscoveryValue, err error) {
+ for len(data) > 0 {
+ val := CiscoDiscoveryValue{
+ Type: CDPTLVType(binary.BigEndian.Uint16(data[:2])),
+ Length: binary.BigEndian.Uint16(data[2:4]),
+ }
+ if val.Length < 4 {
+ err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length)
+ break
+ }
+ val.Value = data[4:val.Length]
+ values = append(values, val)
+ data = data[val.Length:]
+ }
+ return
+}
+
+func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error {
+ var err error
+ info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}}
+ p.AddLayer(info)
+ values, err := decodeCiscoDiscoveryTLVs(data)
+ if err != nil { // Unlikely, as parent decode will fail, but better safe...
+ return err
+ }
+ for _, val := range values {
+ switch val.Type {
+ case CDPTLVDevID:
+ info.DeviceID = string(val.Value)
+ case CDPTLVAddress:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.Addresses, err = decodeAddresses(val.Value)
+ if err != nil {
+ return err
+ }
+ case CDPTLVPortID:
+ info.PortID = string(val.Value)
+ case CDPTLVCapabilities:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4]))
+ info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0)
+ info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0)
+ info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0)
+ info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0)
+ info.Capabilities.IsHost = (val&CDPCapMaskHost > 0)
+ info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0)
+ info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0)
+ info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0)
+ info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0)
+ case CDPTLVVersion:
+ info.Version = string(val.Value)
+ case CDPTLVPlatform:
+ info.Platform = string(val.Value)
+ case CDPTLVIPPrefix:
+ v := val.Value
+ l := len(v)
+ if l%5 == 0 && l >= 5 {
+ for len(v) > 0 {
+ _, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4]))
+ info.IPPrefixes = append(info.IPPrefixes, *ipnet)
+ v = v[5:]
+ }
+ } else {
+ return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value))
+ }
+ case CDPTLVHello:
+ if err = checkCDPTLVLen(val, 32); err != nil {
+ return err
+ }
+ v := val.Value
+ info.CDPHello.OUI = v[0:3]
+ info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5])
+ info.CDPHello.ClusterMaster = v[5:9]
+ info.CDPHello.Unknown1 = v[9:13]
+ info.CDPHello.Version = v[13]
+ info.CDPHello.SubVersion = v[14]
+ info.CDPHello.Status = v[15]
+ info.CDPHello.Unknown2 = v[16]
+ info.CDPHello.ClusterCommander = v[17:23]
+ info.CDPHello.SwitchMAC = v[23:29]
+ info.CDPHello.Unknown3 = v[29]
+ info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32])
+ case CDPTLVVTPDomain:
+ info.VTPDomain = string(val.Value)
+ case CDPTLVNativeVLAN:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2])
+ case CDPTLVFullDuplex:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.FullDuplex = (val.Value[0] == 1)
+ case CDPTLVVLANReply:
+ if err = checkCDPTLVLen(val, 3); err != nil {
+ return err
+ }
+ info.VLANReply.ID = uint8(val.Value[0])
+ info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+ case CDPTLVVLANQuery:
+ if err = checkCDPTLVLen(val, 3); err != nil {
+ return err
+ }
+ info.VLANQuery.ID = uint8(val.Value[0])
+ info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+ case CDPTLVPower:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2])
+ case CDPTLVMTU:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.MTU = binary.BigEndian.Uint32(val.Value[0:4])
+ case CDPTLVExtendedTrust:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.ExtendedTrust = uint8(val.Value[0])
+ case CDPTLVUntrustedCOS:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.UntrustedCOS = uint8(val.Value[0])
+ case CDPTLVSysName:
+ info.SysName = string(val.Value)
+ case CDPTLVSysOID:
+ info.SysOID = string(val.Value)
+ case CDPTLVMgmtAddresses:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.MgmtAddresses, err = decodeAddresses(val.Value)
+ if err != nil {
+ return err
+ }
+ case CDPTLVLocation:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.Location.Type = uint8(val.Value[0])
+ info.Location.Location = string(val.Value[1:])
+
+ // case CDPTLVLExternalPortID:
+ // Undocumented
+ case CDPTLVPowerRequested:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2])
+ info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+ for n := 4; n < len(val.Value); n += 4 {
+ info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+ }
+ case CDPTLVPowerAvailable:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2])
+ info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+ for n := 4; n < len(val.Value); n += 4 {
+ info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+ }
+ // case CDPTLVPortUnidirectional
+ // Undocumented
+ case CDPTLVEnergyWise:
+ if err = checkCDPTLVLen(val, 72); err != nil {
+ return err
+ }
+ info.EnergyWise.EncryptedData = val.Value[0:20]
+ info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24])
+ info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28])
+ info.EnergyWise.ModelNumber = string(val.Value[28:44])
+ info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46])
+ info.EnergyWise.HardwareID = string(val.Value[46:49])
+ info.EnergyWise.SerialNum = string(val.Value[49:60])
+ info.EnergyWise.Unknown3 = val.Value[60:68]
+ tlvLen := binary.BigEndian.Uint16(val.Value[68:70])
+ tlvNum := binary.BigEndian.Uint16(val.Value[70:72])
+ data := val.Value[72:]
+ if len(data) < int(tlvLen) {
+ return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data))
+ }
+ numSeen := 0
+ for len(data) > 8 {
+ numSeen++
+ if numSeen > int(tlvNum) { // Too many TLV's ?
+ return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen)
+ }
+ tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4]))
+ tLen := int(binary.BigEndian.Uint32(data[4:8]))
+ if tLen > len(data)-8 {
+ return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8)
+ }
+ data = data[8:]
+ switch tType {
+ case CDPEnergyWiseRole:
+ info.EnergyWise.Role = string(data[:])
+ case CDPEnergyWiseDomain:
+ info.EnergyWise.Domain = string(data[:])
+ case CDPEnergyWiseName:
+ info.EnergyWise.Name = string(data[:])
+ case CDPEnergyWiseReplyTo:
+ if len(data) >= 18 {
+ info.EnergyWise.ReplyUnknown1 = data[0:2]
+ info.EnergyWise.ReplyPort = data[2:4]
+ info.EnergyWise.ReplyAddress = data[4:8]
+ info.EnergyWise.ReplyUnknown2 = data[8:10]
+ info.EnergyWise.ReplyUnknown3 = data[10:14]
+ }
+ }
+ data = data[tLen:]
+ }
+ case CDPTLVSparePairPOE:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ v := val.Value[0]
+ info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0)
+ info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0)
+ info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0)
+ info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0)
+ default:
+ info.Unknown = append(info.Unknown, val)
+ }
+ }
+ return nil
+}
+
+// CDP Protocol Types
+const (
+ CDPProtocolTypeNLPID byte = 1
+ CDPProtocolType802_2 byte = 2
+)
+
+// CDPAddressType is used to define TLV values within CDP addresses.
+type CDPAddressType uint64
+
+// CDP Address types.
+const (
+ CDPAddressTypeCLNP CDPAddressType = 0x81
+ CDPAddressTypeIPV4 CDPAddressType = 0xcc
+ CDPAddressTypeIPV6 CDPAddressType = 0xaaaa030000000800
+ CDPAddressTypeDECNET CDPAddressType = 0xaaaa030000006003
+ CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b
+ CDPAddressTypeIPX CDPAddressType = 0xaaaa030000008137
+ CDPAddressTypeVINES CDPAddressType = 0xaaaa0300000080c4
+ CDPAddressTypeXNS CDPAddressType = 0xaaaa030000000600
+ CDPAddressTypeAPOLLO CDPAddressType = 0xaaaa030000008019
+)
+
+func decodeAddresses(v []byte) (addresses []net.IP, err error) {
+ numaddr := int(binary.BigEndian.Uint32(v[0:4]))
+ if numaddr < 1 {
+ return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr)
+ }
+ v = v[4:]
+ if len(v) < numaddr*8 {
+ return nil, fmt.Errorf("Invalid Address TLV length %d", len(v))
+ }
+ for i := 0; i < numaddr; i++ {
+ prottype := v[0]
+ if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type
+ return nil, fmt.Errorf("Invalid Address Protocol %d", prottype)
+ }
+ protlen := int(v[1])
+ if (prottype == CDPProtocolTypeNLPID && protlen != 1) ||
+ (prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length
+ return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen)
+ }
+ plen := make([]byte, 8)
+ copy(plen[8-protlen:], v[2:2+protlen])
+ protocol := CDPAddressType(binary.BigEndian.Uint64(plen))
+ v = v[2+protlen:]
+ addrlen := binary.BigEndian.Uint16(v[0:2])
+ ab := v[2 : 2+addrlen]
+ if protocol == CDPAddressTypeIPV4 && addrlen == 4 {
+ addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3]))
+ } else if protocol == CDPAddressTypeIPV6 && addrlen == 16 {
+ addresses = append(addresses, net.IP(ab))
+ } else {
+ // only handle IPV4 & IPV6 for now
+ }
+ v = v[2+addrlen:]
+ if len(v) < 8 {
+ break
+ }
+ }
+ return
+}
+
+func (t CDPTLVType) String() (s string) {
+ switch t {
+ case CDPTLVDevID:
+ s = "Device ID"
+ case CDPTLVAddress:
+ s = "Addresses"
+ case CDPTLVPortID:
+ s = "Port ID"
+ case CDPTLVCapabilities:
+ s = "Capabilities"
+ case CDPTLVVersion:
+ s = "Software Version"
+ case CDPTLVPlatform:
+ s = "Platform"
+ case CDPTLVIPPrefix:
+ s = "IP Prefix"
+ case CDPTLVHello:
+ s = "Protocol Hello"
+ case CDPTLVVTPDomain:
+ s = "VTP Management Domain"
+ case CDPTLVNativeVLAN:
+ s = "Native VLAN"
+ case CDPTLVFullDuplex:
+ s = "Full Duplex"
+ case CDPTLVVLANReply:
+ s = "VoIP VLAN Reply"
+ case CDPTLVVLANQuery:
+ s = "VLANQuery"
+ case CDPTLVPower:
+ s = "Power consumption"
+ case CDPTLVMTU:
+ s = "MTU"
+ case CDPTLVExtendedTrust:
+ s = "Extended Trust Bitmap"
+ case CDPTLVUntrustedCOS:
+ s = "Untrusted Port CoS"
+ case CDPTLVSysName:
+ s = "System Name"
+ case CDPTLVSysOID:
+ s = "System OID"
+ case CDPTLVMgmtAddresses:
+ s = "Management Addresses"
+ case CDPTLVLocation:
+ s = "Location"
+ case CDPTLVExternalPortID:
+ s = "External Port ID"
+ case CDPTLVPowerRequested:
+ s = "Power Requested"
+ case CDPTLVPowerAvailable:
+ s = "Power Available"
+ case CDPTLVPortUnidirectional:
+ s = "Port Unidirectional"
+ case CDPTLVEnergyWise:
+ s = "Energy Wise"
+ case CDPTLVSparePairPOE:
+ s = "Spare Pair POE"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (a CDPAddressType) String() (s string) {
+ switch a {
+ case CDPAddressTypeCLNP:
+ s = "Connectionless Network Protocol"
+ case CDPAddressTypeIPV4:
+ s = "IPv4"
+ case CDPAddressTypeIPV6:
+ s = "IPv6"
+ case CDPAddressTypeDECNET:
+ s = "DECnet Phase IV"
+ case CDPAddressTypeAPPLETALK:
+ s = "Apple Talk"
+ case CDPAddressTypeIPX:
+ s = "Novell IPX"
+ case CDPAddressTypeVINES:
+ s = "Banyan VINES"
+ case CDPAddressTypeXNS:
+ s = "Xerox Network Systems"
+ case CDPAddressTypeAPOLLO:
+ s = "Apollo"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t CDPEnergyWiseSubtype) String() (s string) {
+ switch t {
+ case CDPEnergyWiseRole:
+ s = "Role"
+ case CDPEnergyWiseDomain:
+ s = "Domain"
+ case CDPEnergyWiseName:
+ s = "Name"
+ case CDPEnergyWiseReplyTo:
+ s = "ReplyTo"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) {
+ if len(v.Value) < l {
+ err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value))
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/google/gopacket/layers/ctp.go
new file mode 100644
index 0000000..8287584
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ctp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each
+// EthernetCTP layer.
+type EthernetCTPFunction uint16
+
+// EthernetCTPFunction values.
+const (
+ EthernetCTPFunctionReply EthernetCTPFunction = 1
+ EthernetCTPFunctionForwardData EthernetCTPFunction = 2
+)
+
+// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html.
+// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more
+// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer.
+type EthernetCTP struct {
+ BaseLayer
+ SkipCount uint16
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTP.
+func (c *EthernetCTP) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTP
+}
+
+// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP. See EthernetCTP's docs for more
+// details.
+type EthernetCTPForwardData struct {
+ BaseLayer
+ Function EthernetCTPFunction
+ ForwardAddress []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPForwardData.
+func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTPForwardData
+}
+
+// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint.
+func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress)
+}
+
+// EthernetCTPReply is the Reply layer inside EthernetCTP. See EthernetCTP's docs for more details.
+type EthernetCTPReply struct {
+ BaseLayer
+ Function EthernetCTPFunction
+ ReceiptNumber uint16
+ Data []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPReply.
+func (c *EthernetCTPReply) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTPReply
+}
+
+// Payload returns the EthernetCTP reply's Data bytes.
+func (c *EthernetCTPReply) Payload() []byte { return c.Data }
+
+func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error {
+ c := &EthernetCTP{
+ SkipCount: binary.LittleEndian.Uint16(data[:2]),
+ BaseLayer: BaseLayer{data[:2], data[2:]},
+ }
+ if c.SkipCount%2 != 0 {
+ return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount)
+ }
+ p.AddLayer(c)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+}
+
+// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP
+// layer type to decode next, then decodes based on that.
+func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error {
+ function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2]))
+ switch function {
+ case EthernetCTPFunctionReply:
+ reply := &EthernetCTPReply{
+ Function: function,
+ ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]),
+ Data: data[4:],
+ BaseLayer: BaseLayer{data, nil},
+ }
+ p.AddLayer(reply)
+ p.SetApplicationLayer(reply)
+ return nil
+ case EthernetCTPFunctionForwardData:
+ forward := &EthernetCTPForwardData{
+ Function: function,
+ ForwardAddress: data[2:8],
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ }
+ p.AddLayer(forward)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+ }
+ return fmt.Errorf("Unknown EthernetCTP function type %v", function)
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/google/gopacket/layers/dhcpv4.go
new file mode 100644
index 0000000..3bbd036
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv4.go
@@ -0,0 +1,585 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// DHCPOp rerprents a bootp operation
+type DHCPOp byte
+
+// bootp operations
+const (
+ DHCPOpRequest DHCPOp = 1
+ DHCPOpReply DHCPOp = 2
+)
+
+// String returns a string version of a DHCPOp.
+func (o DHCPOp) String() string {
+ switch o {
+ case DHCPOpRequest:
+ return "Request"
+ case DHCPOpReply:
+ return "Reply"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPMsgType represents a DHCP operation
+type DHCPMsgType byte
+
+// Constants that represent DHCP operations
+const (
+ DHCPMsgTypeUnspecified DHCPMsgType = iota
+ DHCPMsgTypeDiscover
+ DHCPMsgTypeOffer
+ DHCPMsgTypeRequest
+ DHCPMsgTypeDecline
+ DHCPMsgTypeAck
+ DHCPMsgTypeNak
+ DHCPMsgTypeRelease
+ DHCPMsgTypeInform
+)
+
+// String returns a string version of a DHCPMsgType.
+func (o DHCPMsgType) String() string {
+ switch o {
+ case DHCPMsgTypeUnspecified:
+ return "Unspecified"
+ case DHCPMsgTypeDiscover:
+ return "Discover"
+ case DHCPMsgTypeOffer:
+ return "Offer"
+ case DHCPMsgTypeRequest:
+ return "Request"
+ case DHCPMsgTypeDecline:
+ return "Decline"
+ case DHCPMsgTypeAck:
+ return "Ack"
+ case DHCPMsgTypeNak:
+ return "Nak"
+ case DHCPMsgTypeRelease:
+ return "Release"
+ case DHCPMsgTypeInform:
+ return "Inform"
+ default:
+ return "Unknown"
+ }
+}
+
+//DHCPMagic is the RFC 2131 "magic cooke" for DHCP.
+var DHCPMagic uint32 = 0x63825363
+
+// DHCPv4 contains data for a single DHCP packet.
+type DHCPv4 struct {
+ BaseLayer
+ Operation DHCPOp
+ HardwareType LinkType
+ HardwareLen uint8
+ HardwareOpts uint8
+ Xid uint32
+ Secs uint16
+ Flags uint16
+ ClientIP net.IP
+ YourClientIP net.IP
+ NextServerIP net.IP
+ RelayAgentIP net.IP
+ ClientHWAddr net.HardwareAddr
+ ServerName []byte
+ File []byte
+ Options DHCPOptions
+}
+
+// DHCPOptions is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPOptions []DHCPOption
+
+// String returns a string version of the options list.
+func (o DHCPOptions) String() string {
+ buf := &bytes.Buffer{}
+ buf.WriteByte('[')
+ for i, opt := range o {
+ buf.WriteString(opt.String())
+ if i+1 != len(o) {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv4
+func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.Options = d.Options[:0]
+ d.Operation = DHCPOp(data[0])
+ d.HardwareType = LinkType(data[1])
+ d.HardwareLen = data[2]
+ d.HardwareOpts = data[3]
+ d.Xid = binary.BigEndian.Uint32(data[4:8])
+ d.Secs = binary.BigEndian.Uint16(data[8:10])
+ d.Flags = binary.BigEndian.Uint16(data[10:12])
+ d.ClientIP = net.IP(data[12:16])
+ d.YourClientIP = net.IP(data[16:20])
+ d.NextServerIP = net.IP(data[20:24])
+ d.RelayAgentIP = net.IP(data[24:28])
+ d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen])
+ d.ServerName = data[44:108]
+ d.File = data[108:236]
+ if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic {
+ return InvalidMagicCookie
+ }
+
+ if len(data) <= 240 {
+ // DHCP Packet could have no option (??)
+ return nil
+ }
+
+ options := data[240:]
+
+ stop := len(options)
+ start := 0
+ for start < stop {
+ o := DHCPOption{}
+ if err := o.decode(options[start:]); err != nil {
+ return err
+ }
+ if o.Type == DHCPOptEnd {
+ break
+ }
+ d.Options = append(d.Options, o)
+ // Check if the option is a single byte pad
+ if o.Type == DHCPOptPad {
+ start++
+ } else {
+ start += int(o.Length) + 2
+ }
+ }
+ return nil
+}
+
+// Len returns the length of a DHCPv4 packet.
+func (d *DHCPv4) Len() uint16 {
+ n := uint16(240)
+ for _, o := range d.Options {
+ if o.Type == DHCPOptPad {
+ n++
+ } else {
+ n += uint16(o.Length) + 2
+ }
+ }
+ n++ // for opt end
+ return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ plen := int(d.Len())
+
+ data, err := b.PrependBytes(plen)
+ if err != nil {
+ return err
+ }
+
+ data[0] = byte(d.Operation)
+ data[1] = byte(d.HardwareType)
+ if opts.FixLengths {
+ d.HardwareLen = uint8(len(d.ClientHWAddr))
+ }
+ data[2] = d.HardwareLen
+ data[3] = d.HardwareOpts
+ binary.BigEndian.PutUint32(data[4:8], d.Xid)
+ binary.BigEndian.PutUint16(data[8:10], d.Secs)
+ binary.BigEndian.PutUint16(data[10:12], d.Flags)
+ copy(data[12:16], d.ClientIP.To4())
+ copy(data[16:20], d.YourClientIP.To4())
+ copy(data[20:24], d.NextServerIP.To4())
+ copy(data[24:28], d.RelayAgentIP.To4())
+ copy(data[28:44], d.ClientHWAddr)
+ copy(data[44:108], d.ServerName)
+ copy(data[108:236], d.File)
+ binary.BigEndian.PutUint32(data[236:240], DHCPMagic)
+
+ if len(d.Options) > 0 {
+ offset := 240
+ for _, o := range d.Options {
+ if err := o.encode(data[offset:]); err != nil {
+ return err
+ }
+ // A pad option is only a single byte
+ if o.Type == DHCPOptPad {
+ offset++
+ } else {
+ offset += 2 + len(o.Data)
+ }
+ }
+ optend := NewDHCPOption(DHCPOptEnd, nil)
+ if err := optend.encode(data[offset:]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeDHCPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv4) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error {
+ dhcp := &DHCPv4{}
+ err := dhcp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(dhcp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPOpt represents a DHCP option or parameter from RFC-2132
+type DHCPOpt byte
+
+// Constants for the DHCPOpt options.
+const (
+ DHCPOptPad DHCPOpt = 0
+ DHCPOptSubnetMask DHCPOpt = 1 // 4, net.IP
+ DHCPOptTimeOffset DHCPOpt = 2 // 4, int32 (signed seconds from UTC)
+ DHCPOptRouter DHCPOpt = 3 // n*4, [n]net.IP
+ DHCPOptTimeServer DHCPOpt = 4 // n*4, [n]net.IP
+ DHCPOptNameServer DHCPOpt = 5 // n*4, [n]net.IP
+ DHCPOptDNS DHCPOpt = 6 // n*4, [n]net.IP
+ DHCPOptLogServer DHCPOpt = 7 // n*4, [n]net.IP
+ DHCPOptCookieServer DHCPOpt = 8 // n*4, [n]net.IP
+ DHCPOptLPRServer DHCPOpt = 9 // n*4, [n]net.IP
+ DHCPOptImpressServer DHCPOpt = 10 // n*4, [n]net.IP
+ DHCPOptResLocServer DHCPOpt = 11 // n*4, [n]net.IP
+ DHCPOptHostname DHCPOpt = 12 // n, string
+ DHCPOptBootfileSize DHCPOpt = 13 // 2, uint16
+ DHCPOptMeritDumpFile DHCPOpt = 14 // >1, string
+ DHCPOptDomainName DHCPOpt = 15 // n, string
+ DHCPOptSwapServer DHCPOpt = 16 // n*4, [n]net.IP
+ DHCPOptRootPath DHCPOpt = 17 // n, string
+ DHCPOptExtensionsPath DHCPOpt = 18 // n, string
+ DHCPOptIPForwarding DHCPOpt = 19 // 1, bool
+ DHCPOptSourceRouting DHCPOpt = 20 // 1, bool
+ DHCPOptPolicyFilter DHCPOpt = 21 // 8*n, [n]{net.IP/net.IP}
+ DHCPOptDatagramMTU DHCPOpt = 22 // 2, uint16
+ DHCPOptDefaultTTL DHCPOpt = 23 // 1, byte
+ DHCPOptPathMTUAgingTimeout DHCPOpt = 24 // 4, uint32
+ DHCPOptPathPlateuTableOption DHCPOpt = 25 // 2*n, []uint16
+ DHCPOptInterfaceMTU DHCPOpt = 26 // 2, uint16
+ DHCPOptAllSubsLocal DHCPOpt = 27 // 1, bool
+ DHCPOptBroadcastAddr DHCPOpt = 28 // 4, net.IP
+ DHCPOptMaskDiscovery DHCPOpt = 29 // 1, bool
+ DHCPOptMaskSupplier DHCPOpt = 30 // 1, bool
+ DHCPOptRouterDiscovery DHCPOpt = 31 // 1, bool
+ DHCPOptSolicitAddr DHCPOpt = 32 // 4, net.IP
+ DHCPOptStaticRoute DHCPOpt = 33 // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask
+ DHCPOptARPTrailers DHCPOpt = 34 // 1, bool
+ DHCPOptARPTimeout DHCPOpt = 35 // 4, uint32
+ DHCPOptEthernetEncap DHCPOpt = 36 // 1, bool
+ DHCPOptTCPTTL DHCPOpt = 37 // 1, byte
+ DHCPOptTCPKeepAliveInt DHCPOpt = 38 // 4, uint32
+ DHCPOptTCPKeepAliveGarbage DHCPOpt = 39 // 1, bool
+ DHCPOptNISDomain DHCPOpt = 40 // n, string
+ DHCPOptNISServers DHCPOpt = 41 // 4*n, [n]net.IP
+ DHCPOptNTPServers DHCPOpt = 42 // 4*n, [n]net.IP
+ DHCPOptVendorOption DHCPOpt = 43 // n, [n]byte // may be encapsulated.
+ DHCPOptNetBIOSTCPNS DHCPOpt = 44 // 4*n, [n]net.IP
+ DHCPOptNetBIOSTCPDDS DHCPOpt = 45 // 4*n, [n]net.IP
+ DHCPOptNETBIOSTCPNodeType DHCPOpt = 46 // 1, magic byte
+ DHCPOptNetBIOSTCPScope DHCPOpt = 47 // n, string
+ DHCPOptXFontServer DHCPOpt = 48 // n, string
+ DHCPOptXDisplayManager DHCPOpt = 49 // n, string
+ DHCPOptRequestIP DHCPOpt = 50 // 4, net.IP
+ DHCPOptLeaseTime DHCPOpt = 51 // 4, uint32
+ DHCPOptExtOptions DHCPOpt = 52 // 1, 1/2/3
+ DHCPOptMessageType DHCPOpt = 53 // 1, 1-7
+ DHCPOptServerID DHCPOpt = 54 // 4, net.IP
+ DHCPOptParamsRequest DHCPOpt = 55 // n, []byte
+ DHCPOptMessage DHCPOpt = 56 // n, 3
+ DHCPOptMaxMessageSize DHCPOpt = 57 // 2, uint16
+ DHCPOptT1 DHCPOpt = 58 // 4, uint32
+ DHCPOptT2 DHCPOpt = 59 // 4, uint32
+ DHCPOptClassID DHCPOpt = 60 // n, []byte
+ DHCPOptClientID DHCPOpt = 61 // n >= 2, []byte
+ DHCPOptDomainSearch DHCPOpt = 119 // n, string
+ DHCPOptSIPServers DHCPOpt = 120 // n, url
+ DHCPOptClasslessStaticRoute DHCPOpt = 121 //
+ DHCPOptEnd DHCPOpt = 255
+)
+
+// String returns a string version of a DHCPOpt.
+func (o DHCPOpt) String() string {
+ switch o {
+ case DHCPOptPad:
+ return "(padding)"
+ case DHCPOptSubnetMask:
+ return "SubnetMask"
+ case DHCPOptTimeOffset:
+ return "TimeOffset"
+ case DHCPOptRouter:
+ return "Router"
+ case DHCPOptTimeServer:
+ return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP
+ case DHCPOptNameServer:
+ return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS
+ case DHCPOptDNS:
+ return "DNS"
+ case DHCPOptLogServer:
+ return "mitLCS" // MIT LCS server protocol yada yada w. Syslog
+ case DHCPOptCookieServer:
+ return "CookieServer"
+ case DHCPOptLPRServer:
+ return "LPRServer"
+ case DHCPOptImpressServer:
+ return "ImpressServer"
+ case DHCPOptResLocServer:
+ return "ResourceLocationServer"
+ case DHCPOptHostname:
+ return "Hostname"
+ case DHCPOptBootfileSize:
+ return "BootfileSize"
+ case DHCPOptMeritDumpFile:
+ return "MeritDumpFile"
+ case DHCPOptDomainName:
+ return "DomainName"
+ case DHCPOptSwapServer:
+ return "SwapServer"
+ case DHCPOptRootPath:
+ return "RootPath"
+ case DHCPOptExtensionsPath:
+ return "ExtensionsPath"
+ case DHCPOptIPForwarding:
+ return "IPForwarding"
+ case DHCPOptSourceRouting:
+ return "SourceRouting"
+ case DHCPOptPolicyFilter:
+ return "PolicyFilter"
+ case DHCPOptDatagramMTU:
+ return "DatagramMTU"
+ case DHCPOptDefaultTTL:
+ return "DefaultTTL"
+ case DHCPOptPathMTUAgingTimeout:
+ return "PathMTUAgingTimeout"
+ case DHCPOptPathPlateuTableOption:
+ return "PathPlateuTableOption"
+ case DHCPOptInterfaceMTU:
+ return "InterfaceMTU"
+ case DHCPOptAllSubsLocal:
+ return "AllSubsLocal"
+ case DHCPOptBroadcastAddr:
+ return "BroadcastAddress"
+ case DHCPOptMaskDiscovery:
+ return "MaskDiscovery"
+ case DHCPOptMaskSupplier:
+ return "MaskSupplier"
+ case DHCPOptRouterDiscovery:
+ return "RouterDiscovery"
+ case DHCPOptSolicitAddr:
+ return "SolicitAddr"
+ case DHCPOptStaticRoute:
+ return "StaticRoute"
+ case DHCPOptARPTrailers:
+ return "ARPTrailers"
+ case DHCPOptARPTimeout:
+ return "ARPTimeout"
+ case DHCPOptEthernetEncap:
+ return "EthernetEncap"
+ case DHCPOptTCPTTL:
+ return "TCPTTL"
+ case DHCPOptTCPKeepAliveInt:
+ return "TCPKeepAliveInt"
+ case DHCPOptTCPKeepAliveGarbage:
+ return "TCPKeepAliveGarbage"
+ case DHCPOptNISDomain:
+ return "NISDomain"
+ case DHCPOptNISServers:
+ return "NISServers"
+ case DHCPOptNTPServers:
+ return "NTPServers"
+ case DHCPOptVendorOption:
+ return "VendorOption"
+ case DHCPOptNetBIOSTCPNS:
+ return "NetBIOSOverTCPNS"
+ case DHCPOptNetBIOSTCPDDS:
+ return "NetBiosOverTCPDDS"
+ case DHCPOptNETBIOSTCPNodeType:
+ return "NetBIOSOverTCPNodeType"
+ case DHCPOptNetBIOSTCPScope:
+ return "NetBIOSOverTCPScope"
+ case DHCPOptXFontServer:
+ return "XFontServer"
+ case DHCPOptXDisplayManager:
+ return "XDisplayManager"
+ case DHCPOptEnd:
+ return "(end)"
+ case DHCPOptSIPServers:
+ return "SipServers"
+ case DHCPOptRequestIP:
+ return "RequestIP"
+ case DHCPOptLeaseTime:
+ return "LeaseTime"
+ case DHCPOptExtOptions:
+ return "ExtOpts"
+ case DHCPOptMessageType:
+ return "MessageType"
+ case DHCPOptServerID:
+ return "ServerID"
+ case DHCPOptParamsRequest:
+ return "ParamsRequest"
+ case DHCPOptMessage:
+ return "Message"
+ case DHCPOptMaxMessageSize:
+ return "MaxDHCPSize"
+ case DHCPOptT1:
+ return "Timer1"
+ case DHCPOptT2:
+ return "Timer2"
+ case DHCPOptClassID:
+ return "ClassID"
+ case DHCPOptClientID:
+ return "ClientID"
+ case DHCPOptDomainSearch:
+ return "DomainSearch"
+ case DHCPOptClasslessStaticRoute:
+ return "ClasslessStaticRoute"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPOption rerpresents a DHCP option.
+type DHCPOption struct {
+ Type DHCPOpt
+ Length uint8
+ Data []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPOption) String() string {
+ switch o.Type {
+
+ case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath,
+ DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer,
+ DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string
+ return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data))
+
+ case DHCPOptMessageType:
+ if len(o.Data) != 1 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0]))
+
+ case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr,
+ DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP
+ if len(o.Data) < 4 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data))
+
+ case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout,
+ DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32
+ if len(o.Data) != 4 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%d)", o.Type,
+ uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3]))
+
+ case DHCPOptParamsRequest:
+ buf := &bytes.Buffer{}
+ buf.WriteString(fmt.Sprintf("Option(%s:", o.Type))
+ for i, v := range o.Data {
+ buf.WriteString(DHCPOpt(v).String())
+ if i+1 != len(o.Data) {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(")")
+ return buf.String()
+
+ default:
+ return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data)
+ }
+}
+
+// NewDHCPOption constructs a new DHCPOption with a given type and data.
+func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption {
+ o := DHCPOption{Type: t}
+ if data != nil {
+ o.Data = data
+ o.Length = uint8(len(data))
+ }
+ return o
+}
+
+func (o *DHCPOption) encode(b []byte) error {
+ switch o.Type {
+ case DHCPOptPad, DHCPOptEnd:
+ b[0] = byte(o.Type)
+ default:
+ b[0] = byte(o.Type)
+ b[1] = o.Length
+ copy(b[2:], o.Data)
+ }
+ return nil
+}
+
+func (o *DHCPOption) decode(data []byte) error {
+ if len(data) < 1 {
+ // Pad/End have a length of 1
+ return DecOptionNotEnoughData
+ }
+ o.Type = DHCPOpt(data[0])
+ switch o.Type {
+ case DHCPOptPad, DHCPOptEnd:
+ o.Data = nil
+ default:
+ if len(data) < 2 {
+ return DecOptionNotEnoughData
+ }
+ o.Length = data[1]
+ if int(o.Length) > len(data[2:]) {
+ return DecOptionMalformed
+ }
+ o.Data = data[2 : 2+int(o.Length)]
+ }
+ return nil
+}
+
+// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts.
+type DHCPv4Error string
+
+// DHCPv4Error implements error interface.
+func (d DHCPv4Error) Error() string {
+ return string(d)
+}
+
+const (
+ // DecOptionNotEnoughData is returned when there is not enough data during option's decode process
+ DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode")
+ // DecOptionMalformed is returned when the option is malformed
+ DecOptionMalformed = DHCPv4Error("Option is malformed")
+ // InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header
+ InvalidMagicCookie = DHCPv4Error("Bad DHCP header")
+)
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/google/gopacket/layers/dhcpv6.go
new file mode 100644
index 0000000..052b394
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6.go
@@ -0,0 +1,341 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// DHCPv6MsgType represents a DHCPv6 operation
+type DHCPv6MsgType byte
+
+// Constants that represent DHCP operations
+const (
+ DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota
+ DHCPv6MsgTypeSolicit
+ DHCPv6MsgTypeAdverstise
+ DHCPv6MsgTypeRequest
+ DHCPv6MsgTypeConfirm
+ DHCPv6MsgTypeRenew
+ DHCPv6MsgTypeRebind
+ DHCPv6MsgTypeReply
+ DHCPv6MsgTypeRelease
+ DHCPv6MsgTypeDecline
+ DHCPv6MsgTypeReconfigure
+ DHCPv6MsgTypeInformationRequest
+ DHCPv6MsgTypeRelayForward
+ DHCPv6MsgTypeRelayReply
+)
+
+// String returns a string version of a DHCPv6MsgType.
+func (o DHCPv6MsgType) String() string {
+ switch o {
+ case DHCPv6MsgTypeUnspecified:
+ return "Unspecified"
+ case DHCPv6MsgTypeSolicit:
+ return "Solicit"
+ case DHCPv6MsgTypeAdverstise:
+ return "Adverstise"
+ case DHCPv6MsgTypeRequest:
+ return "Request"
+ case DHCPv6MsgTypeConfirm:
+ return "Confirm"
+ case DHCPv6MsgTypeRenew:
+ return "Renew"
+ case DHCPv6MsgTypeRebind:
+ return "Rebind"
+ case DHCPv6MsgTypeReply:
+ return "Reply"
+ case DHCPv6MsgTypeRelease:
+ return "Release"
+ case DHCPv6MsgTypeDecline:
+ return "Decline"
+ case DHCPv6MsgTypeReconfigure:
+ return "Reconfigure"
+ case DHCPv6MsgTypeInformationRequest:
+ return "InformationRequest"
+ case DHCPv6MsgTypeRelayForward:
+ return "RelayForward"
+ case DHCPv6MsgTypeRelayReply:
+ return "RelayReply"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6 contains data for a single DHCP packet.
+type DHCPv6 struct {
+ BaseLayer
+ MsgType DHCPv6MsgType
+ HopCount uint8
+ LinkAddr net.IP
+ PeerAddr net.IP
+ TransactionID []byte
+ Options DHCPv6Options
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv6
+func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.BaseLayer = BaseLayer{Contents: data}
+ d.Options = d.Options[:0]
+ d.MsgType = DHCPv6MsgType(data[0])
+
+ offset := 0
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ d.HopCount = data[1]
+ d.LinkAddr = net.IP(data[2:18])
+ d.PeerAddr = net.IP(data[18:34])
+ offset = 34
+ } else {
+ d.TransactionID = data[1:4]
+ offset = 4
+ }
+
+ stop := len(data)
+ for offset < stop {
+ o := DHCPv6Option{}
+ if err := o.decode(data[offset:]); err != nil {
+ return err
+ }
+ d.Options = append(d.Options, o)
+ offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+
+ return nil
+}
+
+// Len returns the length of a DHCPv6 packet.
+func (d *DHCPv6) Len() int {
+ n := 1
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ n += 33
+ } else {
+ n += 3
+ }
+
+ for _, o := range d.Options {
+ n += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+
+ return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ plen := int(d.Len())
+
+ data, err := b.PrependBytes(plen)
+ if err != nil {
+ return err
+ }
+
+ offset := 0
+ data[0] = byte(d.MsgType)
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ data[1] = byte(d.HopCount)
+ copy(data[2:18], d.LinkAddr.To16())
+ copy(data[18:34], d.PeerAddr.To16())
+ offset = 34
+ } else {
+ copy(data[1:4], d.TransactionID)
+ offset = 4
+ }
+
+ if len(d.Options) > 0 {
+ for _, o := range d.Options {
+ if err := o.encode(data[offset:], opts); err != nil {
+ return err
+ }
+ offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeDHCPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv6) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error {
+ dhcp := &DHCPv6{}
+ err := dhcp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(dhcp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPv6StatusCode represents a DHCP status code - RFC-3315
+type DHCPv6StatusCode uint16
+
+// Constants for the DHCPv6StatusCode.
+const (
+ DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota
+ DHCPv6StatusCodeUnspecFail
+ DHCPv6StatusCodeNoAddrsAvail
+ DHCPv6StatusCodeNoBinding
+ DHCPv6StatusCodeNotOnLink
+ DHCPv6StatusCodeUseMulticast
+)
+
+// String returns a string version of a DHCPv6StatusCode.
+func (o DHCPv6StatusCode) String() string {
+ switch o {
+ case DHCPv6StatusCodeSuccess:
+ return "Success"
+ case DHCPv6StatusCodeUnspecFail:
+ return "UnspecifiedFailure"
+ case DHCPv6StatusCodeNoAddrsAvail:
+ return "NoAddressAvailable"
+ case DHCPv6StatusCodeNoBinding:
+ return "NoBinding"
+ case DHCPv6StatusCodeNotOnLink:
+ return "NotOnLink"
+ case DHCPv6StatusCodeUseMulticast:
+ return "UseMulticast"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6DUIDType represents a DHCP DUID - RFC-3315
+type DHCPv6DUIDType uint16
+
+// Constants for the DHCPv6DUIDType.
+const (
+ DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1
+ DHCPv6DUIDTypeEN
+ DHCPv6DUIDTypeLL
+)
+
+// String returns a string version of a DHCPv6DUIDType.
+func (o DHCPv6DUIDType) String() string {
+ switch o {
+ case DHCPv6DUIDTypeLLT:
+ return "LLT"
+ case DHCPv6DUIDTypeEN:
+ return "EN"
+ case DHCPv6DUIDTypeLL:
+ return "LL"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19)
+type DHCPv6DUID struct {
+ Type DHCPv6DUIDType
+ // LLT, LL
+ HardwareType []byte
+ // EN
+ EnterpriseNumber []byte
+ // LLT
+ Time []byte
+ // LLT, LL
+ LinkLayerAddress net.HardwareAddr
+ // EN
+ Identifier []byte
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error {
+ if len(data) < 2 {
+ return errors.New("Not enough bytes to decode: " + string(len(data)))
+ }
+
+ d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2]))
+ if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+ d.HardwareType = data[2:4]
+ }
+
+ if d.Type == DHCPv6DUIDTypeLLT {
+ d.Time = data[4:8]
+ d.LinkLayerAddress = net.HardwareAddr(data[8:])
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ d.EnterpriseNumber = data[2:6]
+ d.Identifier = data[6:]
+ } else { // DHCPv6DUIDTypeLL
+ d.LinkLayerAddress = net.HardwareAddr(data[4:])
+ }
+
+ return nil
+}
+
+// Encode encodes the DHCPv6DUID in a slice of bytes
+func (d *DHCPv6DUID) Encode() []byte {
+ length := d.Len()
+ data := make([]byte, length)
+ binary.BigEndian.PutUint16(data[0:2], uint16(d.Type))
+
+ if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+ copy(data[2:4], d.HardwareType)
+ }
+
+ if d.Type == DHCPv6DUIDTypeLLT {
+ copy(data[4:8], d.Time)
+ copy(data[8:], d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ copy(data[2:6], d.EnterpriseNumber)
+ copy(data[6:], d.Identifier)
+ } else {
+ copy(data[4:], d.LinkLayerAddress)
+ }
+
+ return data
+}
+
+// Len returns the length of the DHCPv6DUID, respecting the type
+func (d *DHCPv6DUID) Len() int {
+ length := 2 // d.Type
+ if d.Type == DHCPv6DUIDTypeLLT {
+ length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier)
+ } else { // LL
+ length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress)
+ }
+
+ return length
+}
+
+func (d *DHCPv6DUID) String() string {
+ duid := "Type: " + d.Type.String() + ", "
+ if d.Type == DHCPv6DUIDTypeLLT {
+ duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier)
+ } else { // DHCPv6DUIDTypeLL
+ duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress)
+ }
+ return duid
+}
+
+func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) {
+ duid := &DHCPv6DUID{}
+ err := duid.DecodeFromBytes(data)
+ if err != nil {
+ return nil, err
+ }
+ return duid, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
new file mode 100644
index 0000000..0c05e35
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// DHCPv6Opt represents a DHCP option or parameter from RFC-3315
+type DHCPv6Opt uint16
+
+// Constants for the DHCPv6Opt options.
+const (
+ DHCPv6OptClientID DHCPv6Opt = 1
+ DHCPv6OptServerID DHCPv6Opt = 2
+ DHCPv6OptIANA DHCPv6Opt = 3
+ DHCPv6OptIATA DHCPv6Opt = 4
+ DHCPv6OptIAAddr DHCPv6Opt = 5
+ DHCPv6OptOro DHCPv6Opt = 6
+ DHCPv6OptPreference DHCPv6Opt = 7
+ DHCPv6OptElapsedTime DHCPv6Opt = 8
+ DHCPv6OptRelayMessage DHCPv6Opt = 9
+ DHCPv6OptAuth DHCPv6Opt = 11
+ DHCPv6OptUnicast DHCPv6Opt = 12
+ DHCPv6OptStatusCode DHCPv6Opt = 13
+ DHCPv6OptRapidCommit DHCPv6Opt = 14
+ DHCPv6OptUserClass DHCPv6Opt = 15
+ DHCPv6OptVendorClass DHCPv6Opt = 16
+ DHCPv6OptVendorOpts DHCPv6Opt = 17
+ DHCPv6OptInterfaceID DHCPv6Opt = 18
+ DHCPv6OptReconfigureMessage DHCPv6Opt = 19
+ DHCPv6OptReconfigureAccept DHCPv6Opt = 20
+
+ // RFC 3319 Session Initiation Protocol (SIP)
+ DHCPv6OptSIPServersDomainList DHCPv6Opt = 21
+ DHCPv6OptSIPServersAddressList DHCPv6Opt = 22
+
+ // RFC 3646 DNS Configuration
+ DHCPv6OptDNSServers DHCPv6Opt = 23
+ DHCPv6OptDomainList DHCPv6Opt = 24
+
+ // RFC 3633 Prefix Delegation
+ DHCPv6OptIAPD DHCPv6Opt = 25
+ DHCPv6OptIAPrefix DHCPv6Opt = 26
+
+ // RFC 3898 Network Information Service (NIS)
+ DHCPv6OptNISServers DHCPv6Opt = 27
+ DHCPv6OptNISPServers DHCPv6Opt = 28
+ DHCPv6OptNISDomainName DHCPv6Opt = 29
+ DHCPv6OptNISPDomainName DHCPv6Opt = 30
+
+ // RFC 4075 Simple Network Time Protocol (SNTP)
+ DHCPv6OptSNTPServers DHCPv6Opt = 31
+
+ // RFC 4242 Information Refresh Time Option
+ DHCPv6OptInformationRefreshTime DHCPv6Opt = 32
+
+ // RFC 4280 Broadcast and Multicast Control Servers
+ DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33
+ DHCPv6OptBCMCSServerAddressList DHCPv6Opt = 34
+
+ // RFC 4776 Civic Address ConfigurationOption
+ DHCPv6OptGeoconfCivic DHCPv6Opt = 36
+
+ // RFC 4649 Relay Agent Remote-ID
+ DHCPv6OptRemoteID DHCPv6Opt = 37
+
+ // RFC 4580 Relay Agent Subscriber-ID
+ DHCPv6OptSubscriberID DHCPv6Opt = 38
+
+ // RFC 4704 Client Full Qualified Domain Name (FQDN)
+ DHCPv6OptClientFQDN DHCPv6Opt = 39
+
+ // RFC 5192 Protocol for Carrying Authentication for Network Access (PANA)
+ DHCPv6OptPanaAgent DHCPv6Opt = 40
+
+ // RFC 4833 Timezone Options
+ DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41
+ DHCPv6OptNewTZDBTimezone DHCPv6Opt = 42
+
+ // RFC 4994 Relay Agent Echo Request
+ DHCPv6OptEchoRequestOption DHCPv6Opt = 43
+
+ // RFC 5007 Leasequery
+ DHCPv6OptLQQuery DHCPv6Opt = 44
+ DHCPv6OptCLTTime DHCPv6Opt = 45
+ DHCPv6OptClientData DHCPv6Opt = 46
+ DHCPv6OptLQRelayData DHCPv6Opt = 47
+ DHCPv6OptLQClientLink DHCPv6Opt = 48
+
+ // RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6)
+ DHCPv6OptMIP6HNIDF DHCPv6Opt = 49
+ DHCPv6OptMIP6VDINF DHCPv6Opt = 50
+ DHCPv6OptMIP6IDINF DHCPv6Opt = 69
+ DHCPv6OptMIP6UDINF DHCPv6Opt = 70
+ DHCPv6OptMIP6HNP DHCPv6Opt = 71
+ DHCPv6OptMIP6HAA DHCPv6Opt = 72
+ DHCPv6OptMIP6HAF DHCPv6Opt = 73
+
+ // RFC 5223 Discovering Location-to-Service Translation (LoST) Servers
+ DHCPv6OptV6LOST DHCPv6Opt = 51
+
+ // RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP)
+ DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52
+
+ // RFC 5460 Bulk Leasequery
+ DHCPv6OptRelayID DHCPv6Opt = 53
+
+ // RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery
+ DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54
+ DHCPv6OptIPv6FQDNMoS DHCPv6Opt = 55
+
+ // RFC 5908 NTP Server Option
+ DHCPv6OptNTPServer DHCPv6Opt = 56
+
+ // RFC 5986 Discovering the Local Location Information Server (LIS)
+ DHCPv6OptV6AccessDomain DHCPv6Opt = 57
+
+ // RFC 5986 SIP User Agent
+ DHCPv6OptSIPUACSList DHCPv6Opt = 58
+
+ // RFC 5970 Options for Network Boot
+ DHCPv6OptBootFileURL DHCPv6Opt = 59
+ DHCPv6OptBootFileParam DHCPv6Opt = 60
+ DHCPv6OptClientArchType DHCPv6Opt = 61
+ DHCPv6OptNII DHCPv6Opt = 62
+
+ // RFC 6225 Coordinate-Based Location Configuration Information
+ DHCPv6OptGeolocation DHCPv6Opt = 63
+
+ // RFC 6334 Dual-Stack Lite
+ DHCPv6OptAFTRName DHCPv6Opt = 64
+
+ // RFC 6440 EAP Re-authentication Protocol (ERP)
+ DHCPv6OptERPLocalDomainName DHCPv6Opt = 65
+
+ // RFC 6422 Relay-Supplied DHCP Options
+ DHCPv6OptRSOO DHCPv6Opt = 66
+
+ // RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation
+ DHCPv6OptPDExclude DHCPv6Opt = 67
+
+ // RFC 6607 Virtual Subnet Selection
+ DHCPv6OptVSS DHCPv6Opt = 68
+
+ // RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes
+ DHCPv6OptRDNSSSelection DHCPv6Opt = 74
+
+ // RFC 6784 Kerberos Options for DHCPv6
+ DHCPv6OptKRBPrincipalName DHCPv6Opt = 75
+ DHCPv6OptKRBRealmName DHCPv6Opt = 76
+ DHCPv6OptKRBKDC DHCPv6Opt = 77
+
+ // RFC 6939 Client Link-Layer Address Option
+ DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79
+
+ // RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents
+ DHCPv6OptLinkAddress DHCPv6Opt = 80
+
+ // RFC 7037 RADIUS Option for the DHCPv6 Relay Agent
+ DHCPv6OptRADIUS DHCPv6Opt = 81
+
+ // RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT
+ DHCPv6OptSolMaxRt DHCPv6Opt = 82
+ DHCPv6OptInfMaxRt DHCPv6Opt = 83
+
+ // RFC 7078 Distributing Address Selection Policy
+ DHCPv6OptAddrSel DHCPv6Opt = 84
+ DHCPv6OptAddrSelTable DHCPv6Opt = 85
+
+ // RFC 7291 DHCP Options for the Port Control Protocol (PCP)
+ DHCPv6OptV6PCPServer DHCPv6Opt = 86
+
+ // RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport
+ DHCPv6OptDHCPv4Message DHCPv6Opt = 87
+ DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88
+
+ // RFC 7598 Configuration of Softwire Address and Port-Mapped Clients
+ DHCPv6OptS46Rule DHCPv6Opt = 89
+ DHCPv6OptS46BR DHCPv6Opt = 90
+ DHCPv6OptS46DMR DHCPv6Opt = 91
+ DHCPv6OptS46V4V4Bind DHCPv6Opt = 92
+ DHCPv6OptS46PortParameters DHCPv6Opt = 93
+ DHCPv6OptS46ContMAPE DHCPv6Opt = 94
+ DHCPv6OptS46ContMAPT DHCPv6Opt = 95
+ DHCPv6OptS46ContLW DHCPv6Opt = 96
+
+ // RFC 7600 IPv4 Residual Deployment via IPv6
+ DHCPv6Opt4RD DHCPv6Opt = 97
+ DHCPv6Opt4RDMapRule DHCPv6Opt = 98
+ DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99
+
+ // RFC 7653 Active Leasequery
+ DHCPv6OptLQBaseTime DHCPv6Opt = 100
+ DHCPv6OptLQStartTime DHCPv6Opt = 101
+ DHCPv6OptLQEndTime DHCPv6Opt = 102
+
+ // RFC 7710 Captive-Portal Identification
+ DHCPv6OptCaptivePortal DHCPv6Opt = 103
+
+ // RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration
+ DHCPv6OptMPLParameters DHCPv6Opt = 104
+
+ // RFC 7839 Access-Network-Identifier (ANI)
+ DHCPv6OptANIATT DHCPv6Opt = 105
+ DHCPv6OptANINetworkName DHCPv6Opt = 106
+ DHCPv6OptANIAPName DHCPv6Opt = 107
+ DHCPv6OptANIAPBSSID DHCPv6Opt = 108
+ DHCPv6OptANIOperatorID DHCPv6Opt = 109
+ DHCPv6OptANIOperatorRealm DHCPv6Opt = 110
+
+ // RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE)
+ DHCPv6OptS46Priority DHCPv6Opt = 111
+
+ // draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD)
+ DHCPv6OptMUDURLV6 DHCPv6Opt = 112
+
+ // RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes
+ DHCPv6OptV6Prefix64 DHCPv6Opt = 113
+
+ // RFC 8156 DHCPv6 Failover Protocol
+ DHCPv6OptFBindingStatus DHCPv6Opt = 114
+ DHCPv6OptFConnectFlags DHCPv6Opt = 115
+ DHCPv6OptFDNSRemovalInfo DHCPv6Opt = 116
+ DHCPv6OptFDNSHostName DHCPv6Opt = 117
+ DHCPv6OptFDNSZoneName DHCPv6Opt = 118
+ DHCPv6OptFDNSFlags DHCPv6Opt = 119
+ DHCPv6OptFExpirationTime DHCPv6Opt = 120
+ DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121
+ DHCPv6OptFMCLT DHCPv6Opt = 122
+ DHCPv6OptFPartnerLifetime DHCPv6Opt = 123
+ DHCPv6OptFPartnerLifetimeSent DHCPv6Opt = 124
+ DHCPv6OptFPartnerDownTime DHCPv6Opt = 125
+ DHCPv6OptFPartnerRawCltTime DHCPv6Opt = 126
+ DHCPv6OptFProtocolVersion DHCPv6Opt = 127
+ DHCPv6OptFKeepaliveTime DHCPv6Opt = 128
+ DHCPv6OptFReconfigureData DHCPv6Opt = 129
+ DHCPv6OptFRelationshipName DHCPv6Opt = 130
+ DHCPv6OptFServerFlags DHCPv6Opt = 131
+ DHCPv6OptFServerState DHCPv6Opt = 132
+ DHCPv6OptFStartTimeOfState DHCPv6Opt = 133
+ DHCPv6OptFStateExpirationTime DHCPv6Opt = 134
+
+ // RFC 8357 Generalized UDP Source Port for DHCP Relay
+ DHCPv6OptRelayPort DHCPv6Opt = 135
+
+ // draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices
+ DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136
+
+ // RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery
+ DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143
+)
+
+// String returns a string version of a DHCPv6Opt.
+func (o DHCPv6Opt) String() string {
+ switch o {
+ case DHCPv6OptClientID:
+ return "ClientID"
+ case DHCPv6OptServerID:
+ return "ServerID"
+ case DHCPv6OptIANA:
+ return "IA_NA"
+ case DHCPv6OptIATA:
+ return "IA_TA"
+ case DHCPv6OptIAAddr:
+ return "IAAddr"
+ case DHCPv6OptOro:
+ return "Oro"
+ case DHCPv6OptPreference:
+ return "Preference"
+ case DHCPv6OptElapsedTime:
+ return "ElapsedTime"
+ case DHCPv6OptRelayMessage:
+ return "RelayMessage"
+ case DHCPv6OptAuth:
+ return "Auth"
+ case DHCPv6OptUnicast:
+ return "Unicast"
+ case DHCPv6OptStatusCode:
+ return "StatusCode"
+ case DHCPv6OptRapidCommit:
+ return "RapidCommit"
+ case DHCPv6OptUserClass:
+ return "UserClass"
+ case DHCPv6OptVendorClass:
+ return "VendorClass"
+ case DHCPv6OptVendorOpts:
+ return "VendorOpts"
+ case DHCPv6OptInterfaceID:
+ return "InterfaceID"
+ case DHCPv6OptReconfigureMessage:
+ return "ReconfigureMessage"
+ case DHCPv6OptReconfigureAccept:
+ return "ReconfigureAccept"
+ case DHCPv6OptSIPServersDomainList:
+ return "SIPServersDomainList"
+ case DHCPv6OptSIPServersAddressList:
+ return "SIPServersAddressList"
+ case DHCPv6OptDNSServers:
+ return "DNSRecursiveNameServer"
+ case DHCPv6OptDomainList:
+ return "DomainSearchList"
+ case DHCPv6OptIAPD:
+ return "IdentityAssociationPrefixDelegation"
+ case DHCPv6OptIAPrefix:
+ return "IAPDPrefix"
+ case DHCPv6OptNISServers:
+ return "NISServers"
+ case DHCPv6OptNISPServers:
+ return "NISv2Servers"
+ case DHCPv6OptNISDomainName:
+ return "NISDomainName"
+ case DHCPv6OptNISPDomainName:
+ return "NISv2DomainName"
+ case DHCPv6OptSNTPServers:
+ return "SNTPServers"
+ case DHCPv6OptInformationRefreshTime:
+ return "InformationRefreshTime"
+ case DHCPv6OptBCMCSServerDomainNameList:
+ return "BCMCSControlServersDomainNameList"
+ case DHCPv6OptBCMCSServerAddressList:
+ return "BCMCSControlServersAddressList"
+ case DHCPv6OptGeoconfCivic:
+ return "CivicAddress"
+ case DHCPv6OptRemoteID:
+ return "RelayAgentRemoteID"
+ case DHCPv6OptSubscriberID:
+ return "RelayAgentSubscriberID"
+ case DHCPv6OptClientFQDN:
+ return "ClientFQDN"
+ case DHCPv6OptPanaAgent:
+ return "PANAAuthenticationAgent"
+ case DHCPv6OptNewPOSIXTimezone:
+ return "NewPOSIXTimezone"
+ case DHCPv6OptNewTZDBTimezone:
+ return "NewTZDBTimezone"
+ case DHCPv6OptEchoRequestOption:
+ return "EchoRequest"
+ case DHCPv6OptLQQuery:
+ return "LeasequeryQuery"
+ case DHCPv6OptClientData:
+ return "LeasequeryClientData"
+ case DHCPv6OptCLTTime:
+ return "LeasequeryClientLastTransactionTime"
+ case DHCPv6OptLQRelayData:
+ return "LeasequeryRelayData"
+ case DHCPv6OptLQClientLink:
+ return "LeasequeryClientLink"
+ case DHCPv6OptMIP6HNIDF:
+ return "MIPv6HomeNetworkIDFQDN"
+ case DHCPv6OptMIP6VDINF:
+ return "MIPv6VisitedHomeNetworkInformation"
+ case DHCPv6OptMIP6IDINF:
+ return "MIPv6IdentifiedHomeNetworkInformation"
+ case DHCPv6OptMIP6UDINF:
+ return "MIPv6UnrestrictedHomeNetworkInformation"
+ case DHCPv6OptMIP6HNP:
+ return "MIPv6HomeNetworkPrefix"
+ case DHCPv6OptMIP6HAA:
+ return "MIPv6HomeAgentAddress"
+ case DHCPv6OptMIP6HAF:
+ return "MIPv6HomeAgentFQDN"
+ case DHCPv6OptV6LOST:
+ return "LoST Server"
+ case DHCPv6OptCAPWAPACV6:
+ return "CAPWAPAccessControllerV6"
+ case DHCPv6OptRelayID:
+ return "LeasequeryRelayID"
+ case DHCPv6OptIPv6AddressMoS:
+ return "MoSIPv6Address"
+ case DHCPv6OptIPv6FQDNMoS:
+ return "MoSDomainNameList"
+ case DHCPv6OptNTPServer:
+ return "NTPServer"
+ case DHCPv6OptV6AccessDomain:
+ return "AccessNetworkDomainName"
+ case DHCPv6OptSIPUACSList:
+ return "SIPUserAgentConfigurationServiceDomains"
+ case DHCPv6OptBootFileURL:
+ return "BootFileURL"
+ case DHCPv6OptBootFileParam:
+ return "BootFileParameters"
+ case DHCPv6OptClientArchType:
+ return "ClientSystemArchitectureType"
+ case DHCPv6OptNII:
+ return "ClientNetworkInterfaceIdentifier"
+ case DHCPv6OptGeolocation:
+ return "Geolocation"
+ case DHCPv6OptAFTRName:
+ return "AFTRName"
+ case DHCPv6OptERPLocalDomainName:
+ return "AFTRName"
+ case DHCPv6OptRSOO:
+ return "RSOOption"
+ case DHCPv6OptPDExclude:
+ return "PrefixExclude"
+ case DHCPv6OptVSS:
+ return "VirtualSubnetSelection"
+ case DHCPv6OptRDNSSSelection:
+ return "RDNSSSelection"
+ case DHCPv6OptKRBPrincipalName:
+ return "KerberosPrincipalName"
+ case DHCPv6OptKRBRealmName:
+ return "KerberosRealmName"
+ case DHCPv6OptKRBKDC:
+ return "KerberosKDC"
+ case DHCPv6OptClientLinkLayerAddress:
+ return "ClientLinkLayerAddress"
+ case DHCPv6OptLinkAddress:
+ return "LinkAddress"
+ case DHCPv6OptRADIUS:
+ return "RADIUS"
+ case DHCPv6OptSolMaxRt:
+ return "SolMaxRt"
+ case DHCPv6OptInfMaxRt:
+ return "InfMaxRt"
+ case DHCPv6OptAddrSel:
+ return "AddressSelection"
+ case DHCPv6OptAddrSelTable:
+ return "AddressSelectionTable"
+ case DHCPv6OptV6PCPServer:
+ return "PCPServer"
+ case DHCPv6OptDHCPv4Message:
+ return "DHCPv4Message"
+ case DHCPv6OptDHCPv4OverDHCPv6Server:
+ return "DHCP4o6ServerAddress"
+ case DHCPv6OptS46Rule:
+ return "S46Rule"
+ case DHCPv6OptS46BR:
+ return "S46BR"
+ case DHCPv6OptS46DMR:
+ return "S46DMR"
+ case DHCPv6OptS46V4V4Bind:
+ return "S46IPv4IPv6AddressBinding"
+ case DHCPv6OptS46PortParameters:
+ return "S46PortParameters"
+ case DHCPv6OptS46ContMAPE:
+ return "S46MAPEContainer"
+ case DHCPv6OptS46ContMAPT:
+ return "S46MAPTContainer"
+ case DHCPv6OptS46ContLW:
+ return "S46Lightweight4Over6Container"
+ case DHCPv6Opt4RD:
+ return "4RD"
+ case DHCPv6Opt4RDMapRule:
+ return "4RDMapRule"
+ case DHCPv6Opt4RDNonMapRule:
+ return "4RDNonMapRule"
+ case DHCPv6OptLQBaseTime:
+ return "LQBaseTime"
+ case DHCPv6OptLQStartTime:
+ return "LQStartTime"
+ case DHCPv6OptLQEndTime:
+ return "LQEndTime"
+ case DHCPv6OptCaptivePortal:
+ return "CaptivePortal"
+ case DHCPv6OptMPLParameters:
+ return "MPLParameterConfiguration"
+ case DHCPv6OptANIATT:
+ return "ANIAccessTechnologyType"
+ case DHCPv6OptANINetworkName:
+ return "ANINetworkName"
+ case DHCPv6OptANIAPName:
+ return "ANIAccessPointName"
+ case DHCPv6OptANIAPBSSID:
+ return "ANIAccessPointBSSID"
+ case DHCPv6OptANIOperatorID:
+ return "ANIOperatorIdentifier"
+ case DHCPv6OptANIOperatorRealm:
+ return "ANIOperatorRealm"
+ case DHCPv6OptS46Priority:
+ return "S64Priority"
+ case DHCPv6OptMUDURLV6:
+ return "ManufacturerUsageDescriptionURL"
+ case DHCPv6OptV6Prefix64:
+ return "V6Prefix64"
+ case DHCPv6OptFBindingStatus:
+ return "FailoverBindingStatus"
+ case DHCPv6OptFConnectFlags:
+ return "FailoverConnectFlags"
+ case DHCPv6OptFDNSRemovalInfo:
+ return "FailoverDNSRemovalInfo"
+ case DHCPv6OptFDNSHostName:
+ return "FailoverDNSHostName"
+ case DHCPv6OptFDNSZoneName:
+ return "FailoverDNSZoneName"
+ case DHCPv6OptFDNSFlags:
+ return "FailoverDNSFlags"
+ case DHCPv6OptFExpirationTime:
+ return "FailoverExpirationTime"
+ case DHCPv6OptFMaxUnacknowledgedBNDUPD:
+ return "FailoverMaxUnacknowledgedBNDUPDMessages"
+ case DHCPv6OptFMCLT:
+ return "FailoverMaximumClientLeadTime"
+ case DHCPv6OptFPartnerLifetime:
+ return "FailoverPartnerLifetime"
+ case DHCPv6OptFPartnerLifetimeSent:
+ return "FailoverPartnerLifetimeSent"
+ case DHCPv6OptFPartnerDownTime:
+ return "FailoverPartnerDownTime"
+ case DHCPv6OptFPartnerRawCltTime:
+ return "FailoverPartnerRawClientLeadTime"
+ case DHCPv6OptFProtocolVersion:
+ return "FailoverProtocolVersion"
+ case DHCPv6OptFKeepaliveTime:
+ return "FailoverKeepaliveTime"
+ case DHCPv6OptFReconfigureData:
+ return "FailoverReconfigureData"
+ case DHCPv6OptFRelationshipName:
+ return "FailoverRelationshipName"
+ case DHCPv6OptFServerFlags:
+ return "FailoverServerFlags"
+ case DHCPv6OptFServerState:
+ return "FailoverServerState"
+ case DHCPv6OptFStartTimeOfState:
+ return "FailoverStartTimeOfState"
+ case DHCPv6OptFStateExpirationTime:
+ return "FailoverStateExpirationTime"
+ case DHCPv6OptRelayPort:
+ return "RelayPort"
+ case DHCPv6OptV6ZeroTouchRedirect:
+ return "ZeroTouch"
+ case DHCPv6OptIPV6AddressANDSF:
+ return "ANDSFIPv6Address"
+ default:
+ return fmt.Sprintf("Unknown(%d)", uint16(o))
+ }
+}
+
+// DHCPv6Options is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPv6Options []DHCPv6Option
+
+// String returns a string version of the options list.
+func (o DHCPv6Options) String() string {
+ buf := &bytes.Buffer{}
+ buf.WriteByte('[')
+ for i, opt := range o {
+ buf.WriteString(opt.String())
+ if i+1 != len(o) {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// DHCPv6Option rerpresents a DHCP option.
+type DHCPv6Option struct {
+ Code DHCPv6Opt
+ Length uint16
+ Data []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPv6Option) String() string {
+ switch o.Code {
+ case DHCPv6OptClientID, DHCPv6OptServerID:
+ duid, err := decodeDHCPv6DUID(o.Data)
+ if err != nil {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Code)
+ }
+ return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String())
+ case DHCPv6OptOro:
+ options := ""
+ for i := 0; i < int(o.Length); i += 2 {
+ if options != "" {
+ options += ","
+ }
+ option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2]))
+ options += option.String()
+ }
+ return fmt.Sprintf("Option(%s:[%s])", o.Code, options)
+ default:
+ return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data)
+ }
+}
+
+// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data.
+func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option {
+ o := DHCPv6Option{Code: code}
+ if data != nil {
+ o.Data = data
+ o.Length = uint16(len(data))
+ }
+
+ return o
+}
+
+func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error {
+ binary.BigEndian.PutUint16(b[0:2], uint16(o.Code))
+ if opts.FixLengths {
+ binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data)))
+ } else {
+ binary.BigEndian.PutUint16(b[2:4], o.Length)
+ }
+ copy(b[4:], o.Data)
+
+ return nil
+}
+
+func (o *DHCPv6Option) decode(data []byte) error {
+ if len(data) < 2 {
+ return errors.New("not enough data to decode")
+ }
+ o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2]))
+ if len(data) < 3 {
+ return errors.New("not enough data to decode")
+ }
+ o.Length = binary.BigEndian.Uint16(data[2:4])
+ o.Data = data[4 : 4+o.Length]
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go
new file mode 100644
index 0000000..a0b2d72
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dns.go
@@ -0,0 +1,1053 @@
+// Copyright 2014, 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// DNSClass defines the class associated with a request/response. Different DNS
+// classes can be thought of as an array of parallel namespace trees.
+type DNSClass uint16
+
+// DNSClass known values.
+const (
+ DNSClassIN DNSClass = 1 // Internet
+ DNSClassCS DNSClass = 2 // the CSNET class (Obsolete)
+ DNSClassCH DNSClass = 3 // the CHAOS class
+ DNSClassHS DNSClass = 4 // Hesiod [Dyer 87]
+ DNSClassAny DNSClass = 255 // AnyClass
+)
+
+func (dc DNSClass) String() string {
+ switch dc {
+ default:
+ return "Unknown"
+ case DNSClassIN:
+ return "IN"
+ case DNSClassCS:
+ return "CS"
+ case DNSClassCH:
+ return "CH"
+ case DNSClassHS:
+ return "HS"
+ case DNSClassAny:
+ return "Any"
+ }
+}
+
+// DNSType defines the type of data being requested/returned in a
+// question/answer.
+type DNSType uint16
+
+// DNSType known values.
+const (
+ DNSTypeA DNSType = 1 // a host address
+ DNSTypeNS DNSType = 2 // an authoritative name server
+ DNSTypeMD DNSType = 3 // a mail destination (Obsolete - use MX)
+ DNSTypeMF DNSType = 4 // a mail forwarder (Obsolete - use MX)
+ DNSTypeCNAME DNSType = 5 // the canonical name for an alias
+ DNSTypeSOA DNSType = 6 // marks the start of a zone of authority
+ DNSTypeMB DNSType = 7 // a mailbox domain name (EXPERIMENTAL)
+ DNSTypeMG DNSType = 8 // a mail group member (EXPERIMENTAL)
+ DNSTypeMR DNSType = 9 // a mail rename domain name (EXPERIMENTAL)
+ DNSTypeNULL DNSType = 10 // a null RR (EXPERIMENTAL)
+ DNSTypeWKS DNSType = 11 // a well known service description
+ DNSTypePTR DNSType = 12 // a domain name pointer
+ DNSTypeHINFO DNSType = 13 // host information
+ DNSTypeMINFO DNSType = 14 // mailbox or mail list information
+ DNSTypeMX DNSType = 15 // mail exchange
+ DNSTypeTXT DNSType = 16 // text strings
+ DNSTypeAAAA DNSType = 28 // a IPv6 host address [RFC3596]
+ DNSTypeSRV DNSType = 33 // server discovery [RFC2782] [RFC6195]
+ DNSTypeOPT DNSType = 41 // OPT Pseudo-RR [RFC6891]
+)
+
+func (dt DNSType) String() string {
+ switch dt {
+ default:
+ return "Unknown"
+ case DNSTypeA:
+ return "A"
+ case DNSTypeNS:
+ return "NS"
+ case DNSTypeMD:
+ return "MD"
+ case DNSTypeMF:
+ return "MF"
+ case DNSTypeCNAME:
+ return "CNAME"
+ case DNSTypeSOA:
+ return "SOA"
+ case DNSTypeMB:
+ return "MB"
+ case DNSTypeMG:
+ return "MG"
+ case DNSTypeMR:
+ return "MR"
+ case DNSTypeNULL:
+ return "NULL"
+ case DNSTypeWKS:
+ return "WKS"
+ case DNSTypePTR:
+ return "PTR"
+ case DNSTypeHINFO:
+ return "HINFO"
+ case DNSTypeMINFO:
+ return "MINFO"
+ case DNSTypeMX:
+ return "MX"
+ case DNSTypeTXT:
+ return "TXT"
+ case DNSTypeAAAA:
+ return "AAAA"
+ case DNSTypeSRV:
+ return "SRV"
+ case DNSTypeOPT:
+ return "OPT"
+ }
+}
+
+// DNSResponseCode provides response codes for question answers.
+type DNSResponseCode uint8
+
+// DNSResponseCode known values.
+const (
+ DNSResponseCodeNoErr DNSResponseCode = 0 // No error
+ DNSResponseCodeFormErr DNSResponseCode = 1 // Format Error [RFC1035]
+ DNSResponseCodeServFail DNSResponseCode = 2 // Server Failure [RFC1035]
+ DNSResponseCodeNXDomain DNSResponseCode = 3 // Non-Existent Domain [RFC1035]
+ DNSResponseCodeNotImp DNSResponseCode = 4 // Not Implemented [RFC1035]
+ DNSResponseCodeRefused DNSResponseCode = 5 // Query Refused [RFC1035]
+ DNSResponseCodeYXDomain DNSResponseCode = 6 // Name Exists when it should not [RFC2136]
+ DNSResponseCodeYXRRSet DNSResponseCode = 7 // RR Set Exists when it should not [RFC2136]
+ DNSResponseCodeNXRRSet DNSResponseCode = 8 // RR Set that should exist does not [RFC2136]
+ DNSResponseCodeNotAuth DNSResponseCode = 9 // Server Not Authoritative for zone [RFC2136]
+ DNSResponseCodeNotZone DNSResponseCode = 10 // Name not contained in zone [RFC2136]
+ DNSResponseCodeBadVers DNSResponseCode = 16 // Bad OPT Version [RFC2671]
+ DNSResponseCodeBadSig DNSResponseCode = 16 // TSIG Signature Failure [RFC2845]
+ DNSResponseCodeBadKey DNSResponseCode = 17 // Key not recognized [RFC2845]
+ DNSResponseCodeBadTime DNSResponseCode = 18 // Signature out of time window [RFC2845]
+ DNSResponseCodeBadMode DNSResponseCode = 19 // Bad TKEY Mode [RFC2930]
+ DNSResponseCodeBadName DNSResponseCode = 20 // Duplicate key name [RFC2930]
+ DNSResponseCodeBadAlg DNSResponseCode = 21 // Algorithm not supported [RFC2930]
+ DNSResponseCodeBadTruc DNSResponseCode = 22 // Bad Truncation [RFC4635]
+)
+
+func (drc DNSResponseCode) String() string {
+ switch drc {
+ default:
+ return "Unknown"
+ case DNSResponseCodeNoErr:
+ return "No Error"
+ case DNSResponseCodeFormErr:
+ return "Format Error"
+ case DNSResponseCodeServFail:
+ return "Server Failure "
+ case DNSResponseCodeNXDomain:
+ return "Non-Existent Domain"
+ case DNSResponseCodeNotImp:
+ return "Not Implemented"
+ case DNSResponseCodeRefused:
+ return "Query Refused"
+ case DNSResponseCodeYXDomain:
+ return "Name Exists when it should not"
+ case DNSResponseCodeYXRRSet:
+ return "RR Set Exists when it should not"
+ case DNSResponseCodeNXRRSet:
+ return "RR Set that should exist does not"
+ case DNSResponseCodeNotAuth:
+ return "Server Not Authoritative for zone"
+ case DNSResponseCodeNotZone:
+ return "Name not contained in zone"
+ case DNSResponseCodeBadVers:
+ return "Bad OPT Version"
+ case DNSResponseCodeBadKey:
+ return "Key not recognized"
+ case DNSResponseCodeBadTime:
+ return "Signature out of time window"
+ case DNSResponseCodeBadMode:
+ return "Bad TKEY Mode"
+ case DNSResponseCodeBadName:
+ return "Duplicate key name"
+ case DNSResponseCodeBadAlg:
+ return "Algorithm not supported"
+ case DNSResponseCodeBadTruc:
+ return "Bad Truncation"
+ }
+}
+
+// DNSOpCode defines a set of different operation types.
+type DNSOpCode uint8
+
+// DNSOpCode known values.
+const (
+ DNSOpCodeQuery DNSOpCode = 0 // Query [RFC1035]
+ DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425]
+ DNSOpCodeStatus DNSOpCode = 2 // Status [RFC1035]
+ DNSOpCodeNotify DNSOpCode = 4 // Notify [RFC1996]
+ DNSOpCodeUpdate DNSOpCode = 5 // Update [RFC2136]
+)
+
+func (doc DNSOpCode) String() string {
+ switch doc {
+ default:
+ return "Unknown"
+ case DNSOpCodeQuery:
+ return "Query"
+ case DNSOpCodeIQuery:
+ return "Inverse Query"
+ case DNSOpCodeStatus:
+ return "Status"
+ case DNSOpCodeNotify:
+ return "Notify"
+ case DNSOpCodeUpdate:
+ return "Update"
+ }
+}
+
+// DNS is specified in RFC 1034 / RFC 1035
+// +---------------------+
+// | Header |
+// +---------------------+
+// | Question | the question for the name server
+// +---------------------+
+// | Answer | RRs answering the question
+// +---------------------+
+// | Authority | RRs pointing toward an authority
+// +---------------------+
+// | Additional | RRs holding additional information
+// +---------------------+
+//
+// DNS Header
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ID |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | QDCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ANCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | NSCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ARCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNS contains data from a single Domain Name Service packet.
+type DNS struct {
+ BaseLayer
+
+ // Header fields
+ ID uint16
+ QR bool
+ OpCode DNSOpCode
+
+ AA bool // Authoritative answer
+ TC bool // Truncated
+ RD bool // Recursion desired
+ RA bool // Recursion available
+ Z uint8 // Reserved for future use
+
+ ResponseCode DNSResponseCode
+ QDCount uint16 // Number of questions to expect
+ ANCount uint16 // Number of answers to expect
+ NSCount uint16 // Number of authorities to expect
+ ARCount uint16 // Number of additional records to expect
+
+ // Entries
+ Questions []DNSQuestion
+ Answers []DNSResourceRecord
+ Authorities []DNSResourceRecord
+ Additionals []DNSResourceRecord
+
+ // buffer for doing name decoding. We use a single reusable buffer to avoid
+ // name decoding on a single object via multiple DecodeFromBytes calls
+ // requiring constant allocation of small byte slices.
+ buffer []byte
+}
+
+// LayerType returns gopacket.LayerTypeDNS.
+func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS }
+
+// decodeDNS decodes the byte slice into a DNS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeDNS(data []byte, p gopacket.PacketBuilder) error {
+ d := &DNS{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+ return nil
+}
+
+// DecodeFromBytes decodes the slice into the DNS struct.
+func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.buffer = d.buffer[:0]
+
+ if len(data) < 12 {
+ df.SetTruncated()
+ return errDNSPacketTooShort
+ }
+
+ // since there are no further layers, the baselayer's content is
+ // pointing to this layer
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+ d.ID = binary.BigEndian.Uint16(data[:2])
+ d.QR = data[2]&0x80 != 0
+ d.OpCode = DNSOpCode(data[2]>>3) & 0x0F
+ d.AA = data[2]&0x04 != 0
+ d.TC = data[2]&0x02 != 0
+ d.RD = data[2]&0x01 != 0
+ d.RA = data[3]&0x80 != 0
+ d.Z = uint8(data[3]>>4) & 0x7
+ d.ResponseCode = DNSResponseCode(data[3] & 0xF)
+ d.QDCount = binary.BigEndian.Uint16(data[4:6])
+ d.ANCount = binary.BigEndian.Uint16(data[6:8])
+ d.NSCount = binary.BigEndian.Uint16(data[8:10])
+ d.ARCount = binary.BigEndian.Uint16(data[10:12])
+
+ d.Questions = d.Questions[:0]
+ d.Answers = d.Answers[:0]
+ d.Authorities = d.Authorities[:0]
+ d.Additionals = d.Additionals[:0]
+
+ offset := 12
+ var err error
+ for i := 0; i < int(d.QDCount); i++ {
+ var q DNSQuestion
+ if offset, err = q.decode(data, offset, df, &d.buffer); err != nil {
+ return err
+ }
+ d.Questions = append(d.Questions, q)
+ }
+
+ // For some horrible reason, if we do the obvious thing in this loop:
+ // var r DNSResourceRecord
+ // if blah := r.decode(blah); err != nil {
+ // return err
+ // }
+ // d.Foo = append(d.Foo, r)
+ // the Go compiler thinks that 'r' escapes to the heap, causing a malloc for
+ // every Answer, Authority, and Additional. To get around this, we do
+ // something really silly: we append an empty resource record to our slice,
+ // then use the last value in the slice to call decode. Since the value is
+ // already in the slice, there's no WAY it can escape... on the other hand our
+ // code is MUCH uglier :(
+ for i := 0; i < int(d.ANCount); i++ {
+ d.Answers = append(d.Answers, DNSResourceRecord{})
+ if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Answers = d.Answers[:i] // strip off erroneous value
+ return err
+ }
+ }
+ for i := 0; i < int(d.NSCount); i++ {
+ d.Authorities = append(d.Authorities, DNSResourceRecord{})
+ if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Authorities = d.Authorities[:i] // strip off erroneous value
+ return err
+ }
+ }
+ for i := 0; i < int(d.ARCount); i++ {
+ d.Additionals = append(d.Additionals, DNSResourceRecord{})
+ if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Additionals = d.Additionals[:i] // strip off erroneous value
+ return err
+ }
+ }
+
+ if uint16(len(d.Questions)) != d.QDCount {
+ return errDecodeQueryBadQDCount
+ } else if uint16(len(d.Answers)) != d.ANCount {
+ return errDecodeQueryBadANCount
+ } else if uint16(len(d.Authorities)) != d.NSCount {
+ return errDecodeQueryBadNSCount
+ } else if uint16(len(d.Additionals)) != d.ARCount {
+ return errDecodeQueryBadARCount
+ }
+ return nil
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (d *DNS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDNS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (d *DNS) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// Payload returns nil.
+func (d *DNS) Payload() []byte {
+ return nil
+}
+
+func b2i(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func recSize(rr *DNSResourceRecord) int {
+ switch rr.Type {
+ case DNSTypeA:
+ return 4
+ case DNSTypeAAAA:
+ return 16
+ case DNSTypeNS:
+ return len(rr.NS) + 2
+ case DNSTypeCNAME:
+ return len(rr.CNAME) + 2
+ case DNSTypePTR:
+ return len(rr.PTR) + 2
+ case DNSTypeSOA:
+ return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20
+ case DNSTypeMX:
+ return 2 + len(rr.MX.Name) + 2
+ case DNSTypeTXT:
+ l := len(rr.TXTs)
+ for _, txt := range rr.TXTs {
+ l += len(txt)
+ }
+ return l
+ case DNSTypeSRV:
+ return 6 + len(rr.SRV.Name) + 2
+ case DNSTypeOPT:
+ l := len(rr.OPT) * 4
+ for _, opt := range rr.OPT {
+ l += len(opt.Data)
+ }
+ return l
+ }
+
+ return 0
+}
+
+func computeSize(recs []DNSResourceRecord) int {
+ sz := 0
+ for _, rr := range recs {
+ v := len(rr.Name)
+
+ if v == 0 {
+ sz += v + 11
+ } else {
+ sz += v + 12
+ }
+
+ sz += recSize(&rr)
+ }
+ return sz
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ dsz := 0
+ for _, q := range d.Questions {
+ dsz += len(q.Name) + 6
+ }
+ dsz += computeSize(d.Answers)
+ dsz += computeSize(d.Authorities)
+ dsz += computeSize(d.Additionals)
+
+ bytes, err := b.PrependBytes(12 + dsz)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, d.ID)
+ bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD))
+ bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode))
+
+ if opts.FixLengths {
+ d.QDCount = uint16(len(d.Questions))
+ d.ANCount = uint16(len(d.Answers))
+ d.NSCount = uint16(len(d.Authorities))
+ d.ARCount = uint16(len(d.Additionals))
+ }
+ binary.BigEndian.PutUint16(bytes[4:], d.QDCount)
+ binary.BigEndian.PutUint16(bytes[6:], d.ANCount)
+ binary.BigEndian.PutUint16(bytes[8:], d.NSCount)
+ binary.BigEndian.PutUint16(bytes[10:], d.ARCount)
+
+ off := 12
+ for _, qd := range d.Questions {
+ n := qd.encode(bytes, off)
+ off += n
+ }
+
+ for i := range d.Answers {
+ // done this way so we can modify DNSResourceRecord to fix
+ // lengths if requested
+ qa := &d.Answers[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+
+ for i := range d.Authorities {
+ qa := &d.Authorities[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+ for i := range d.Additionals {
+ qa := &d.Additionals[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+
+ return nil
+}
+
+const maxRecursionLevel = 255
+
+func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) {
+ if level > maxRecursionLevel {
+ return nil, 0, errMaxRecursion
+ } else if offset >= len(data) {
+ return nil, 0, errDNSNameOffsetTooHigh
+ } else if offset < 0 {
+ return nil, 0, errDNSNameOffsetNegative
+ }
+ start := len(*buffer)
+ index := offset
+ if data[index] == 0x00 {
+ return nil, index + 1, nil
+ }
+loop:
+ for data[index] != 0x00 {
+ switch data[index] & 0xc0 {
+ default:
+ /* RFC 1035
+ A domain name represented as a sequence of labels, where
+ each label consists of a length octet followed by that
+ number of octets. The domain name terminates with the
+ zero length octet for the null label of the root. Note
+ that this field may be an odd number of octets; no
+ padding is used.
+ */
+ index2 := index + int(data[index]) + 1
+ if index2-offset > 255 {
+ return nil, 0, errDNSNameTooLong
+ } else if index2 < index+1 || index2 > len(data) {
+ return nil, 0, errDNSNameInvalidIndex
+ }
+ *buffer = append(*buffer, '.')
+ *buffer = append(*buffer, data[index+1:index2]...)
+ index = index2
+
+ case 0xc0:
+ /* RFC 1035
+ The pointer takes the form of a two octet sequence.
+
+ The first two bits are ones. This allows a pointer to
+ be distinguished from a label, since the label must
+ begin with two zero bits because labels are restricted
+ to 63 octets or less. (The 10 and 01 combinations are
+ reserved for future use.) The OFFSET field specifies
+ an offset from the start of the message (i.e., the
+ first octet of the ID field in the domain header). A
+ zero offset specifies the first byte of the ID field,
+ etc.
+
+ The compression scheme allows a domain name in a message to be
+ represented as either:
+ - a sequence of labels ending in a zero octet
+ - a pointer
+ - a sequence of labels ending with a pointer
+ */
+ if index+2 > len(data) {
+ return nil, 0, errDNSPointerOffsetTooHigh
+ }
+ offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff)
+ if offsetp > len(data) {
+ return nil, 0, errDNSPointerOffsetTooHigh
+ }
+ // This looks a little tricky, but actually isn't. Because of how
+ // decodeName is written, calling it appends the decoded name to the
+ // current buffer. We already have the start of the buffer, then, so
+ // once this call is done buffer[start:] will contain our full name.
+ _, _, err := decodeName(data, offsetp, buffer, level+1)
+ if err != nil {
+ return nil, 0, err
+ }
+ index++ // pointer is two bytes, so add an extra byte here.
+ break loop
+ /* EDNS, or other DNS option ? */
+ case 0x40: // RFC 2673
+ return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)",
+ data[index], index)
+
+ case 0x80:
+ return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)",
+ data[index], index)
+ }
+ if index >= len(data) {
+ return nil, 0, errDNSIndexOutOfRange
+ }
+ }
+ if len(*buffer) <= start {
+ return (*buffer)[start:], index + 1, nil
+ }
+ return (*buffer)[start+1:], index + 1, nil
+}
+
+// DNSQuestion wraps a single request (question) within a DNS query.
+type DNSQuestion struct {
+ Name []byte
+ Type DNSType
+ Class DNSClass
+}
+
+func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ q.Name = name
+ q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+ q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+
+ return endq + 4, nil
+}
+
+func (q *DNSQuestion) encode(data []byte, offset int) int {
+ noff := encodeName(q.Name, data, offset)
+ nSz := noff - offset
+ binary.BigEndian.PutUint16(data[noff:], uint16(q.Type))
+ binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class))
+ return nSz + 4
+}
+
+// DNSResourceRecord
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | |
+// / /
+// / NAME /
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TYPE |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | CLASS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TTL |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | RDLENGTH |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+// / RDATA /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNSResourceRecord wraps the data from a single DNS resource within a
+// response.
+type DNSResourceRecord struct {
+ // Header
+ Name []byte
+ Type DNSType
+ Class DNSClass
+ TTL uint32
+
+ // RDATA Raw Values
+ DataLength uint16
+ Data []byte
+
+ // RDATA Decoded Values
+ IP net.IP
+ NS, CNAME, PTR []byte
+ TXTs [][]byte
+ SOA DNSSOA
+ SRV DNSSRV
+ MX DNSMX
+ OPT []DNSOPT // See RFC 6891, section 6.1.2
+
+ // Undecoded TXT for backward compatibility
+ TXT []byte
+}
+
+// decode decodes the resource record, returning the total length of the record.
+func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ rr.Name = name
+ rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+ rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+ rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+ rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10])
+ end := endq + 10 + int(rr.DataLength)
+ if end > len(data) {
+ return 0, errDecodeRecordLength
+ }
+ rr.Data = data[endq+10 : end]
+
+ if err = rr.decodeRData(data, endq+10, buffer); err != nil {
+ return 0, err
+ }
+
+ return endq + 10 + int(rr.DataLength), nil
+}
+
+func encodeName(name []byte, data []byte, offset int) int {
+ l := 0
+ for i := range name {
+ if name[i] == '.' {
+ data[offset+i-l] = byte(l)
+ l = 0
+ } else {
+ // skip one to write the length
+ data[offset+i+1] = name[i]
+ l++
+ }
+ }
+
+ if len(name) == 0 {
+ data[offset] = 0x00 // terminal
+ return offset + 1
+ }
+
+ // length for final portion
+ data[offset+len(name)-l] = byte(l)
+ data[offset+len(name)+1] = 0x00 // terminal
+ return offset + len(name) + 2
+}
+
+func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) {
+
+ noff := encodeName(rr.Name, data, offset)
+ nSz := noff - offset
+
+ binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type))
+ binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class))
+ binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL))
+
+ switch rr.Type {
+ case DNSTypeA:
+ copy(data[noff+10:], rr.IP.To4())
+ case DNSTypeAAAA:
+ copy(data[noff+10:], rr.IP)
+ case DNSTypeNS:
+ encodeName(rr.NS, data, noff+10)
+ case DNSTypeCNAME:
+ encodeName(rr.CNAME, data, noff+10)
+ case DNSTypePTR:
+ encodeName(rr.PTR, data, noff+10)
+ case DNSTypeSOA:
+ noff2 := encodeName(rr.SOA.MName, data, noff+10)
+ noff2 = encodeName(rr.SOA.RName, data, noff2)
+ binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial)
+ binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh)
+ binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry)
+ binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire)
+ binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum)
+ case DNSTypeMX:
+ binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference)
+ encodeName(rr.MX.Name, data, noff+12)
+ case DNSTypeTXT:
+ noff2 := noff + 10
+ for _, txt := range rr.TXTs {
+ data[noff2] = byte(len(txt))
+ copy(data[noff2+1:], txt)
+ noff2 += 1 + len(txt)
+ }
+ case DNSTypeSRV:
+ binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority)
+ binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight)
+ binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port)
+ encodeName(rr.SRV.Name, data, noff+16)
+ case DNSTypeOPT:
+ noff2 := noff + 10
+ for _, opt := range rr.OPT {
+ binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code))
+ binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data)))
+ copy(data[noff2+4:], opt.Data)
+ noff2 += 4 + len(opt.Data)
+ }
+ default:
+ return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type)
+ }
+
+ // DataLength
+ dSz := recSize(rr)
+ binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz))
+
+ if opts.FixLengths {
+ rr.DataLength = uint16(dSz)
+ }
+
+ return nSz + 10 + dSz, nil
+}
+
+func (rr *DNSResourceRecord) String() string {
+
+ if rr.Type == DNSTypeOPT {
+ opts := make([]string, len(rr.OPT))
+ for i, opt := range rr.OPT {
+ opts[i] = opt.String()
+ }
+ return "OPT " + strings.Join(opts, ",")
+ }
+ if rr.Class == DNSClassIN {
+ switch rr.Type {
+ case DNSTypeA, DNSTypeAAAA:
+ return rr.IP.String()
+ case DNSTypeNS:
+ return "NS " + string(rr.NS)
+ case DNSTypeCNAME:
+ return "CNAME " + string(rr.CNAME)
+ case DNSTypePTR:
+ return "PTR " + string(rr.PTR)
+ case DNSTypeTXT:
+ return "TXT " + string(rr.TXT)
+ }
+ }
+
+ return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type)
+}
+
+func decodeCharacterStrings(data []byte) ([][]byte, error) {
+ strings := make([][]byte, 0, 1)
+ end := len(data)
+ for index, index2 := 0, 0; index != end; index = index2 {
+ index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow
+ if index2 > end {
+ return nil, errCharStringMissData
+ }
+ strings = append(strings, data[index+1:index2])
+ }
+ return strings, nil
+}
+
+func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) {
+ allOPT := []DNSOPT{}
+ end := len(data)
+
+ if offset == end {
+ return allOPT, nil // There is no data to read
+ }
+
+ if offset+4 > end {
+ return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset)
+ }
+
+ for i := offset; i < end; {
+ opt := DNSOPT{}
+ opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2]))
+ l := binary.BigEndian.Uint16(data[i+2 : i+4])
+ if i+4+int(l) > end {
+ return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l)
+ }
+ opt.Data = data[i+4 : i+4+int(l)]
+ allOPT = append(allOPT, opt)
+ i += int(l) + 4
+ }
+ return allOPT, nil
+}
+
+func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error {
+ switch rr.Type {
+ case DNSTypeA:
+ rr.IP = rr.Data
+ case DNSTypeAAAA:
+ rr.IP = rr.Data
+ case DNSTypeTXT, DNSTypeHINFO:
+ rr.TXT = rr.Data
+ txts, err := decodeCharacterStrings(rr.Data)
+ if err != nil {
+ return err
+ }
+ rr.TXTs = txts
+ case DNSTypeNS:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.NS = name
+ case DNSTypeCNAME:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.CNAME = name
+ case DNSTypePTR:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.PTR = name
+ case DNSTypeSOA:
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SOA.MName = name
+ name, endq, err = decodeName(data, endq, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SOA.RName = name
+ rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4])
+ rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+ rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12])
+ rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16])
+ rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20])
+ case DNSTypeMX:
+ rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2])
+ name, _, err := decodeName(data, offset+2, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.MX.Name = name
+ case DNSTypeSRV:
+ rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2])
+ rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6])
+ name, _, err := decodeName(data, offset+6, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SRV.Name = name
+ case DNSTypeOPT:
+ allOPT, err := decodeOPTs(data, offset)
+ if err != nil {
+ return err
+ }
+ rr.OPT = allOPT
+ }
+ return nil
+}
+
+// DNSSOA is a Start of Authority record. Each domain requires a SOA record at
+// the cutover where a domain is delegated from its parent.
+type DNSSOA struct {
+ MName, RName []byte
+ Serial, Refresh, Retry, Expire, Minimum uint32
+}
+
+// DNSSRV is a Service record, defining a location (hostname/port) of a
+// server/service.
+type DNSSRV struct {
+ Priority, Weight, Port uint16
+ Name []byte
+}
+
+// DNSMX is a mail exchange record, defining a mail server for a recipient's
+// domain.
+type DNSMX struct {
+ Preference uint16
+ Name []byte
+}
+
+// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2
+type DNSOptionCode uint16
+
+func (doc DNSOptionCode) String() string {
+ switch doc {
+ default:
+ return "Unknown"
+ case DNSOptionCodeNSID:
+ return "NSID"
+ case DNSOptionCodeDAU:
+ return "DAU"
+ case DNSOptionCodeDHU:
+ return "DHU"
+ case DNSOptionCodeN3U:
+ return "N3U"
+ case DNSOptionCodeEDNSClientSubnet:
+ return "EDNSClientSubnet"
+ case DNSOptionCodeEDNSExpire:
+ return "EDNSExpire"
+ case DNSOptionCodeCookie:
+ return "Cookie"
+ case DNSOptionCodeEDNSKeepAlive:
+ return "EDNSKeepAlive"
+ case DNSOptionCodePadding:
+ return "CodePadding"
+ case DNSOptionCodeChain:
+ return "CodeChain"
+ case DNSOptionCodeEDNSKeyTag:
+ return "CodeEDNSKeyTag"
+ case DNSOptionCodeEDNSClientTag:
+ return "EDNSClientTag"
+ case DNSOptionCodeEDNSServerTag:
+ return "EDNSServerTag"
+ case DNSOptionCodeDeviceID:
+ return "DeviceID"
+ }
+}
+
+// DNSOptionCode known values. See IANA
+const (
+ DNSOptionCodeNSID DNSOptionCode = 3
+ DNSOptionCodeDAU DNSOptionCode = 5
+ DNSOptionCodeDHU DNSOptionCode = 6
+ DNSOptionCodeN3U DNSOptionCode = 7
+ DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8
+ DNSOptionCodeEDNSExpire DNSOptionCode = 9
+ DNSOptionCodeCookie DNSOptionCode = 10
+ DNSOptionCodeEDNSKeepAlive DNSOptionCode = 11
+ DNSOptionCodePadding DNSOptionCode = 12
+ DNSOptionCodeChain DNSOptionCode = 13
+ DNSOptionCodeEDNSKeyTag DNSOptionCode = 14
+ DNSOptionCodeEDNSClientTag DNSOptionCode = 16
+ DNSOptionCodeEDNSServerTag DNSOptionCode = 17
+ DNSOptionCodeDeviceID DNSOptionCode = 26946
+)
+
+// DNSOPT is a DNS Option, see RFC6891, section 6.1.2
+type DNSOPT struct {
+ Code DNSOptionCode
+ Data []byte
+}
+
+func (opt DNSOPT) String() string {
+ return fmt.Sprintf("%s=%x", opt.Code, opt.Data)
+}
+
+var (
+ errMaxRecursion = errors.New("max DNS recursion level hit")
+
+ errDNSNameOffsetTooHigh = errors.New("dns name offset too high")
+ errDNSNameOffsetNegative = errors.New("dns name offset is negative")
+ errDNSPacketTooShort = errors.New("DNS packet too short")
+ errDNSNameTooLong = errors.New("dns name is too long")
+ errDNSNameInvalidIndex = errors.New("dns name uncomputable: invalid index")
+ errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high")
+ errDNSIndexOutOfRange = errors.New("dns index walked out of range")
+ errDNSNameHasNoData = errors.New("no dns data found for name")
+
+ errCharStringMissData = errors.New("Insufficient data for a <character-string>")
+
+ errDecodeRecordLength = errors.New("resource record length exceeds data")
+
+ errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions")
+ errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers")
+ errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities")
+ errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info")
+)
diff --git a/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/google/gopacket/layers/doc.go
new file mode 100644
index 0000000..3c882c3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/doc.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package layers provides decoding layers for many common protocols.
+
+The layers package contains decode implementations for a number of different
+types of packet layers. Users of gopacket will almost always want to also use
+layers to actually decode packet data into useful pieces. To see the set of
+protocols that gopacket/layers is currently able to decode,
+look at the set of LayerTypes defined in the Variables sections. The
+layers package also defines endpoints for many of the common packet layers
+that have source/destination addresses associated with them, for example IPv4/6
+(IPs) and TCP/UDP (ports).
+Finally, layers contains a number of useful enumerations (IPProtocol,
+EthernetType, LinkType, PPPType, etc...). Many of these implement the
+gopacket.Decoder interface, so they can be passed into gopacket as decoders.
+
+Most common protocol layers are named using acronyms or other industry-common
+names (IPv4, TCP, PPP). Some of the less common ones have their names expanded
+(CiscoDiscoveryProtocol).
+For certain protocols, sub-parts of the protocol are split out into their own
+layers (SCTP, for example). This is done mostly in cases where portions of the
+protocol may fulfill the capabilities of interesting layers (SCTPData implements
+ApplicationLayer, while base SCTP implements TransportLayer), or possibly
+because splitting a protocol into a few layers makes decoding easier.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket.
+
+Port Types
+
+Instead of using raw uint16 or uint8 values for ports, we use a different port
+type for every protocol, for example TCPPort and UDPPort. This allows us to
+override string behavior for each port, which we do by setting up port name
+maps (TCPPortNames, UDPPortNames, etc...). Well-known ports are annotated with
+their protocol names, and their String function displays these names:
+
+ p := TCPPort(80)
+ fmt.Printf("Number: %d String: %v", p, p)
+ // Prints: "Number: 80 String: 80(http)"
+
+Modifying Decode Behavior
+
+layers links together decoding through its enumerations. For example, after
+decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder.
+All enumerations that act as decoders, like EthernetType, can be modified by
+users depending on their preferences. For example, if you have a spiffy new
+IPv4 decoder that works way better than the one built into layers, you can do
+this:
+
+ var mySpiffyIPv4Decoder gopacket.Decoder = ...
+ layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder
+
+This will make all future ethernet packets use your new decoder to decode IPv4
+packets, instead of the built-in decoder used by gopacket.
+*/
+package layers
diff --git a/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/google/gopacket/layers/dot11.go
new file mode 100644
index 0000000..3843d70
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot11.go
@@ -0,0 +1,2105 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on
+// all of the layers in this file.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control
+// header, all in one place.
+type Dot11Flags uint8
+
+const (
+ Dot11FlagsToDS Dot11Flags = 1 << iota
+ Dot11FlagsFromDS
+ Dot11FlagsMF
+ Dot11FlagsRetry
+ Dot11FlagsPowerManagement
+ Dot11FlagsMD
+ Dot11FlagsWEP
+ Dot11FlagsOrder
+)
+
+func (d Dot11Flags) ToDS() bool {
+ return d&Dot11FlagsToDS != 0
+}
+func (d Dot11Flags) FromDS() bool {
+ return d&Dot11FlagsFromDS != 0
+}
+func (d Dot11Flags) MF() bool {
+ return d&Dot11FlagsMF != 0
+}
+func (d Dot11Flags) Retry() bool {
+ return d&Dot11FlagsRetry != 0
+}
+func (d Dot11Flags) PowerManagement() bool {
+ return d&Dot11FlagsPowerManagement != 0
+}
+func (d Dot11Flags) MD() bool {
+ return d&Dot11FlagsMD != 0
+}
+func (d Dot11Flags) WEP() bool {
+ return d&Dot11FlagsWEP != 0
+}
+func (d Dot11Flags) Order() bool {
+ return d&Dot11FlagsOrder != 0
+}
+
+// String provides a human readable string for Dot11Flags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Flags value, not its string.
+func (a Dot11Flags) String() string {
+ var out bytes.Buffer
+ if a.ToDS() {
+ out.WriteString("TO-DS,")
+ }
+ if a.FromDS() {
+ out.WriteString("FROM-DS,")
+ }
+ if a.MF() {
+ out.WriteString("MF,")
+ }
+ if a.Retry() {
+ out.WriteString("Retry,")
+ }
+ if a.PowerManagement() {
+ out.WriteString("PowerManagement,")
+ }
+ if a.MD() {
+ out.WriteString("MD,")
+ }
+ if a.WEP() {
+ out.WriteString("WEP,")
+ }
+ if a.Order() {
+ out.WriteString("Order,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type Dot11Reason uint16
+
+// TODO: Verify these reasons, and append more reasons if necessary.
+
+const (
+ Dot11ReasonReserved Dot11Reason = 1
+ Dot11ReasonUnspecified Dot11Reason = 2
+ Dot11ReasonAuthExpired Dot11Reason = 3
+ Dot11ReasonDeauthStLeaving Dot11Reason = 4
+ Dot11ReasonInactivity Dot11Reason = 5
+ Dot11ReasonApFull Dot11Reason = 6
+ Dot11ReasonClass2FromNonAuth Dot11Reason = 7
+ Dot11ReasonClass3FromNonAss Dot11Reason = 8
+ Dot11ReasonDisasStLeaving Dot11Reason = 9
+ Dot11ReasonStNotAuth Dot11Reason = 10
+)
+
+// String provides a human readable string for Dot11Reason.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Reason value, not its string.
+func (a Dot11Reason) String() string {
+ switch a {
+ case Dot11ReasonReserved:
+ return "Reserved"
+ case Dot11ReasonUnspecified:
+ return "Unspecified"
+ case Dot11ReasonAuthExpired:
+ return "Auth. expired"
+ case Dot11ReasonDeauthStLeaving:
+ return "Deauth. st. leaving"
+ case Dot11ReasonInactivity:
+ return "Inactivity"
+ case Dot11ReasonApFull:
+ return "Ap. full"
+ case Dot11ReasonClass2FromNonAuth:
+ return "Class2 from non auth."
+ case Dot11ReasonClass3FromNonAss:
+ return "Class3 from non ass."
+ case Dot11ReasonDisasStLeaving:
+ return "Disass st. leaving"
+ case Dot11ReasonStNotAuth:
+ return "St. not auth."
+ default:
+ return "Unknown reason"
+ }
+}
+
+type Dot11Status uint16
+
+const (
+ Dot11StatusSuccess Dot11Status = 0
+ Dot11StatusFailure Dot11Status = 1 // Unspecified failure
+ Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field
+ Dot11StatusInabilityExistsAssociation Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists
+ Dot11StatusAssociationDenied Dot11Status = 12 // Association denied due to reason outside the scope of this standard
+ Dot11StatusAlgorithmUnsupported Dot11Status = 13 // Responding station does not support the specified authentication algorithm
+ Dot11StatusOufOfExpectedSequence Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence
+ Dot11StatusChallengeFailure Dot11Status = 15 // Authentication rejected because of challenge failure
+ Dot11StatusTimeout Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence
+ Dot11StatusAPUnableToHandle Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations
+ Dot11StatusRateUnsupported Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter
+)
+
+// String provides a human readable string for Dot11Status.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Status value, not its string.
+func (a Dot11Status) String() string {
+ switch a {
+ case Dot11StatusSuccess:
+ return "success"
+ case Dot11StatusFailure:
+ return "failure"
+ case Dot11StatusCannotSupportAllCapabilities:
+ return "cannot-support-all-capabilities"
+ case Dot11StatusInabilityExistsAssociation:
+ return "inability-exists-association"
+ case Dot11StatusAssociationDenied:
+ return "association-denied"
+ case Dot11StatusAlgorithmUnsupported:
+ return "algorithm-unsupported"
+ case Dot11StatusOufOfExpectedSequence:
+ return "out-of-expected-sequence"
+ case Dot11StatusChallengeFailure:
+ return "challenge-failure"
+ case Dot11StatusTimeout:
+ return "timeout"
+ case Dot11StatusAPUnableToHandle:
+ return "ap-unable-to-handle"
+ case Dot11StatusRateUnsupported:
+ return "rate-unsupported"
+ default:
+ return "unknown status"
+ }
+}
+
+type Dot11AckPolicy uint8
+
+const (
+ Dot11AckPolicyNormal Dot11AckPolicy = 0
+ Dot11AckPolicyNone Dot11AckPolicy = 1
+ Dot11AckPolicyNoExplicit Dot11AckPolicy = 2
+ Dot11AckPolicyBlock Dot11AckPolicy = 3
+)
+
+// String provides a human readable string for Dot11AckPolicy.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11AckPolicy value, not its string.
+func (a Dot11AckPolicy) String() string {
+ switch a {
+ case Dot11AckPolicyNormal:
+ return "normal-ack"
+ case Dot11AckPolicyNone:
+ return "no-ack"
+ case Dot11AckPolicyNoExplicit:
+ return "no-explicit-ack"
+ case Dot11AckPolicyBlock:
+ return "block-ack"
+ default:
+ return "unknown-ack-policy"
+ }
+}
+
+type Dot11Algorithm uint16
+
+const (
+ Dot11AlgorithmOpen Dot11Algorithm = 0
+ Dot11AlgorithmSharedKey Dot11Algorithm = 1
+)
+
+// String provides a human readable string for Dot11Algorithm.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Algorithm value, not its string.
+func (a Dot11Algorithm) String() string {
+ switch a {
+ case Dot11AlgorithmOpen:
+ return "open"
+ case Dot11AlgorithmSharedKey:
+ return "shared-key"
+ default:
+ return "unknown-algorithm"
+ }
+}
+
+type Dot11InformationElementID uint8
+
+const (
+ Dot11InformationElementIDSSID Dot11InformationElementID = 0
+ Dot11InformationElementIDRates Dot11InformationElementID = 1
+ Dot11InformationElementIDFHSet Dot11InformationElementID = 2
+ Dot11InformationElementIDDSSet Dot11InformationElementID = 3
+ Dot11InformationElementIDCFSet Dot11InformationElementID = 4
+ Dot11InformationElementIDTIM Dot11InformationElementID = 5
+ Dot11InformationElementIDIBSSSet Dot11InformationElementID = 6
+ Dot11InformationElementIDCountryInfo Dot11InformationElementID = 7
+ Dot11InformationElementIDHoppingPatternParam Dot11InformationElementID = 8
+ Dot11InformationElementIDHoppingPatternTable Dot11InformationElementID = 9
+ Dot11InformationElementIDRequest Dot11InformationElementID = 10
+ Dot11InformationElementIDQBSSLoadElem Dot11InformationElementID = 11
+ Dot11InformationElementIDEDCAParamSet Dot11InformationElementID = 12
+ Dot11InformationElementIDTrafficSpec Dot11InformationElementID = 13
+ Dot11InformationElementIDTrafficClass Dot11InformationElementID = 14
+ Dot11InformationElementIDSchedule Dot11InformationElementID = 15
+ Dot11InformationElementIDChallenge Dot11InformationElementID = 16
+ Dot11InformationElementIDPowerConst Dot11InformationElementID = 32
+ Dot11InformationElementIDPowerCapability Dot11InformationElementID = 33
+ Dot11InformationElementIDTPCRequest Dot11InformationElementID = 34
+ Dot11InformationElementIDTPCReport Dot11InformationElementID = 35
+ Dot11InformationElementIDSupportedChannels Dot11InformationElementID = 36
+ Dot11InformationElementIDSwitchChannelAnnounce Dot11InformationElementID = 37
+ Dot11InformationElementIDMeasureRequest Dot11InformationElementID = 38
+ Dot11InformationElementIDMeasureReport Dot11InformationElementID = 39
+ Dot11InformationElementIDQuiet Dot11InformationElementID = 40
+ Dot11InformationElementIDIBSSDFS Dot11InformationElementID = 41
+ Dot11InformationElementIDERPInfo Dot11InformationElementID = 42
+ Dot11InformationElementIDTSDelay Dot11InformationElementID = 43
+ Dot11InformationElementIDTCLASProcessing Dot11InformationElementID = 44
+ Dot11InformationElementIDHTCapabilities Dot11InformationElementID = 45
+ Dot11InformationElementIDQOSCapability Dot11InformationElementID = 46
+ Dot11InformationElementIDERPInfo2 Dot11InformationElementID = 47
+ Dot11InformationElementIDRSNInfo Dot11InformationElementID = 48
+ Dot11InformationElementIDESRates Dot11InformationElementID = 50
+ Dot11InformationElementIDAPChannelReport Dot11InformationElementID = 51
+ Dot11InformationElementIDNeighborReport Dot11InformationElementID = 52
+ Dot11InformationElementIDRCPI Dot11InformationElementID = 53
+ Dot11InformationElementIDMobilityDomain Dot11InformationElementID = 54
+ Dot11InformationElementIDFastBSSTrans Dot11InformationElementID = 55
+ Dot11InformationElementIDTimeoutInt Dot11InformationElementID = 56
+ Dot11InformationElementIDRICData Dot11InformationElementID = 57
+ Dot11InformationElementIDDSERegisteredLoc Dot11InformationElementID = 58
+ Dot11InformationElementIDSuppOperatingClass Dot11InformationElementID = 59
+ Dot11InformationElementIDExtChanSwitchAnnounce Dot11InformationElementID = 60
+ Dot11InformationElementIDHTInfo Dot11InformationElementID = 61
+ Dot11InformationElementIDSecChanOffset Dot11InformationElementID = 62
+ Dot11InformationElementIDBSSAverageAccessDelay Dot11InformationElementID = 63
+ Dot11InformationElementIDAntenna Dot11InformationElementID = 64
+ Dot11InformationElementIDRSNI Dot11InformationElementID = 65
+ Dot11InformationElementIDMeasurePilotTrans Dot11InformationElementID = 66
+ Dot11InformationElementIDBSSAvailAdmCapacity Dot11InformationElementID = 67
+ Dot11InformationElementIDBSSACAccDelayWAPIParam Dot11InformationElementID = 68
+ Dot11InformationElementIDTimeAdvertisement Dot11InformationElementID = 69
+ Dot11InformationElementIDRMEnabledCapabilities Dot11InformationElementID = 70
+ Dot11InformationElementIDMultipleBSSID Dot11InformationElementID = 71
+ Dot11InformationElementID2040BSSCoExist Dot11InformationElementID = 72
+ Dot11InformationElementID2040BSSIntChanReport Dot11InformationElementID = 73
+ Dot11InformationElementIDOverlapBSSScanParam Dot11InformationElementID = 74
+ Dot11InformationElementIDRICDescriptor Dot11InformationElementID = 75
+ Dot11InformationElementIDManagementMIC Dot11InformationElementID = 76
+ Dot11InformationElementIDEventRequest Dot11InformationElementID = 78
+ Dot11InformationElementIDEventReport Dot11InformationElementID = 79
+ Dot11InformationElementIDDiagnosticRequest Dot11InformationElementID = 80
+ Dot11InformationElementIDDiagnosticReport Dot11InformationElementID = 81
+ Dot11InformationElementIDLocationParam Dot11InformationElementID = 82
+ Dot11InformationElementIDNonTransBSSIDCapability Dot11InformationElementID = 83
+ Dot11InformationElementIDSSIDList Dot11InformationElementID = 84
+ Dot11InformationElementIDMultipleBSSIDIndex Dot11InformationElementID = 85
+ Dot11InformationElementIDFMSDescriptor Dot11InformationElementID = 86
+ Dot11InformationElementIDFMSRequest Dot11InformationElementID = 87
+ Dot11InformationElementIDFMSResponse Dot11InformationElementID = 88
+ Dot11InformationElementIDQOSTrafficCapability Dot11InformationElementID = 89
+ Dot11InformationElementIDBSSMaxIdlePeriod Dot11InformationElementID = 90
+ Dot11InformationElementIDTFSRequest Dot11InformationElementID = 91
+ Dot11InformationElementIDTFSResponse Dot11InformationElementID = 92
+ Dot11InformationElementIDWNMSleepMode Dot11InformationElementID = 93
+ Dot11InformationElementIDTIMBroadcastRequest Dot11InformationElementID = 94
+ Dot11InformationElementIDTIMBroadcastResponse Dot11InformationElementID = 95
+ Dot11InformationElementIDCollInterferenceReport Dot11InformationElementID = 96
+ Dot11InformationElementIDChannelUsage Dot11InformationElementID = 97
+ Dot11InformationElementIDTimeZone Dot11InformationElementID = 98
+ Dot11InformationElementIDDMSRequest Dot11InformationElementID = 99
+ Dot11InformationElementIDDMSResponse Dot11InformationElementID = 100
+ Dot11InformationElementIDLinkIdentifier Dot11InformationElementID = 101
+ Dot11InformationElementIDWakeupSchedule Dot11InformationElementID = 102
+ Dot11InformationElementIDChannelSwitchTiming Dot11InformationElementID = 104
+ Dot11InformationElementIDPTIControl Dot11InformationElementID = 105
+ Dot11InformationElementIDPUBufferStatus Dot11InformationElementID = 106
+ Dot11InformationElementIDInterworking Dot11InformationElementID = 107
+ Dot11InformationElementIDAdvertisementProtocol Dot11InformationElementID = 108
+ Dot11InformationElementIDExpBWRequest Dot11InformationElementID = 109
+ Dot11InformationElementIDQOSMapSet Dot11InformationElementID = 110
+ Dot11InformationElementIDRoamingConsortium Dot11InformationElementID = 111
+ Dot11InformationElementIDEmergencyAlertIdentifier Dot11InformationElementID = 112
+ Dot11InformationElementIDMeshConfiguration Dot11InformationElementID = 113
+ Dot11InformationElementIDMeshID Dot11InformationElementID = 114
+ Dot11InformationElementIDMeshLinkMetricReport Dot11InformationElementID = 115
+ Dot11InformationElementIDCongestionNotification Dot11InformationElementID = 116
+ Dot11InformationElementIDMeshPeeringManagement Dot11InformationElementID = 117
+ Dot11InformationElementIDMeshChannelSwitchParam Dot11InformationElementID = 118
+ Dot11InformationElementIDMeshAwakeWindows Dot11InformationElementID = 119
+ Dot11InformationElementIDBeaconTiming Dot11InformationElementID = 120
+ Dot11InformationElementIDMCCAOPSetupRequest Dot11InformationElementID = 121
+ Dot11InformationElementIDMCCAOPSetupReply Dot11InformationElementID = 122
+ Dot11InformationElementIDMCCAOPAdvertisement Dot11InformationElementID = 123
+ Dot11InformationElementIDMCCAOPTeardown Dot11InformationElementID = 124
+ Dot11InformationElementIDGateAnnouncement Dot11InformationElementID = 125
+ Dot11InformationElementIDRootAnnouncement Dot11InformationElementID = 126
+ Dot11InformationElementIDExtCapability Dot11InformationElementID = 127
+ Dot11InformationElementIDAgereProprietary Dot11InformationElementID = 128
+ Dot11InformationElementIDPathRequest Dot11InformationElementID = 130
+ Dot11InformationElementIDPathReply Dot11InformationElementID = 131
+ Dot11InformationElementIDPathError Dot11InformationElementID = 132
+ Dot11InformationElementIDCiscoCCX1CKIPDeviceName Dot11InformationElementID = 133
+ Dot11InformationElementIDCiscoCCX2 Dot11InformationElementID = 136
+ Dot11InformationElementIDProxyUpdate Dot11InformationElementID = 137
+ Dot11InformationElementIDProxyUpdateConfirmation Dot11InformationElementID = 138
+ Dot11InformationElementIDAuthMeshPerringExch Dot11InformationElementID = 139
+ Dot11InformationElementIDMIC Dot11InformationElementID = 140
+ Dot11InformationElementIDDestinationURI Dot11InformationElementID = 141
+ Dot11InformationElementIDUAPSDCoexistence Dot11InformationElementID = 142
+ Dot11InformationElementIDWakeupSchedule80211ad Dot11InformationElementID = 143
+ Dot11InformationElementIDExtendedSchedule Dot11InformationElementID = 144
+ Dot11InformationElementIDSTAAvailability Dot11InformationElementID = 145
+ Dot11InformationElementIDDMGTSPEC Dot11InformationElementID = 146
+ Dot11InformationElementIDNextDMGATI Dot11InformationElementID = 147
+ Dot11InformationElementIDDMSCapabilities Dot11InformationElementID = 148
+ Dot11InformationElementIDCiscoUnknown95 Dot11InformationElementID = 149
+ Dot11InformationElementIDVendor2 Dot11InformationElementID = 150
+ Dot11InformationElementIDDMGOperating Dot11InformationElementID = 151
+ Dot11InformationElementIDDMGBSSParamChange Dot11InformationElementID = 152
+ Dot11InformationElementIDDMGBeamRefinement Dot11InformationElementID = 153
+ Dot11InformationElementIDChannelMeasFeedback Dot11InformationElementID = 154
+ Dot11InformationElementIDAwakeWindow Dot11InformationElementID = 157
+ Dot11InformationElementIDMultiBand Dot11InformationElementID = 158
+ Dot11InformationElementIDADDBAExtension Dot11InformationElementID = 159
+ Dot11InformationElementIDNEXTPCPList Dot11InformationElementID = 160
+ Dot11InformationElementIDPCPHandover Dot11InformationElementID = 161
+ Dot11InformationElementIDDMGLinkMargin Dot11InformationElementID = 162
+ Dot11InformationElementIDSwitchingStream Dot11InformationElementID = 163
+ Dot11InformationElementIDSessionTransmission Dot11InformationElementID = 164
+ Dot11InformationElementIDDynamicTonePairReport Dot11InformationElementID = 165
+ Dot11InformationElementIDClusterReport Dot11InformationElementID = 166
+ Dot11InformationElementIDRelayCapabilities Dot11InformationElementID = 167
+ Dot11InformationElementIDRelayTransferParameter Dot11InformationElementID = 168
+ Dot11InformationElementIDBeamlinkMaintenance Dot11InformationElementID = 169
+ Dot11InformationElementIDMultipleMacSublayers Dot11InformationElementID = 170
+ Dot11InformationElementIDUPID Dot11InformationElementID = 171
+ Dot11InformationElementIDDMGLinkAdaptionAck Dot11InformationElementID = 172
+ Dot11InformationElementIDSymbolProprietary Dot11InformationElementID = 173
+ Dot11InformationElementIDMCCAOPAdvertOverview Dot11InformationElementID = 174
+ Dot11InformationElementIDQuietPeriodRequest Dot11InformationElementID = 175
+ Dot11InformationElementIDQuietPeriodResponse Dot11InformationElementID = 177
+ Dot11InformationElementIDECPACPolicy Dot11InformationElementID = 182
+ Dot11InformationElementIDClusterTimeOffset Dot11InformationElementID = 183
+ Dot11InformationElementIDAntennaSectorID Dot11InformationElementID = 190
+ Dot11InformationElementIDVHTCapabilities Dot11InformationElementID = 191
+ Dot11InformationElementIDVHTOperation Dot11InformationElementID = 192
+ Dot11InformationElementIDExtendedBSSLoad Dot11InformationElementID = 193
+ Dot11InformationElementIDWideBWChannelSwitch Dot11InformationElementID = 194
+ Dot11InformationElementIDVHTTxPowerEnvelope Dot11InformationElementID = 195
+ Dot11InformationElementIDChannelSwitchWrapper Dot11InformationElementID = 196
+ Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199
+ Dot11InformationElementIDUPSIM Dot11InformationElementID = 200
+ Dot11InformationElementIDReducedNeighborReport Dot11InformationElementID = 201
+ Dot11InformationElementIDTVHTOperation Dot11InformationElementID = 202
+ Dot11InformationElementIDDeviceLocation Dot11InformationElementID = 204
+ Dot11InformationElementIDWhiteSpaceMap Dot11InformationElementID = 205
+ Dot11InformationElementIDFineTuningMeasureParams Dot11InformationElementID = 206
+ Dot11InformationElementIDVendor Dot11InformationElementID = 221
+)
+
+// String provides a human readable string for Dot11InformationElementID.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11InformationElementID value,
+// not its string.
+func (a Dot11InformationElementID) String() string {
+ switch a {
+ case Dot11InformationElementIDSSID:
+ return "SSID parameter set"
+ case Dot11InformationElementIDRates:
+ return "Supported Rates"
+ case Dot11InformationElementIDFHSet:
+ return "FH Parameter set"
+ case Dot11InformationElementIDDSSet:
+ return "DS Parameter set"
+ case Dot11InformationElementIDCFSet:
+ return "CF Parameter set"
+ case Dot11InformationElementIDTIM:
+ return "Traffic Indication Map (TIM)"
+ case Dot11InformationElementIDIBSSSet:
+ return "IBSS Parameter set"
+ case Dot11InformationElementIDCountryInfo:
+ return "Country Information"
+ case Dot11InformationElementIDHoppingPatternParam:
+ return "Hopping Pattern Parameters"
+ case Dot11InformationElementIDHoppingPatternTable:
+ return "Hopping Pattern Table"
+ case Dot11InformationElementIDRequest:
+ return "Request"
+ case Dot11InformationElementIDQBSSLoadElem:
+ return "QBSS Load Element"
+ case Dot11InformationElementIDEDCAParamSet:
+ return "EDCA Parameter Set"
+ case Dot11InformationElementIDTrafficSpec:
+ return "Traffic Specification"
+ case Dot11InformationElementIDTrafficClass:
+ return "Traffic Classification"
+ case Dot11InformationElementIDSchedule:
+ return "Schedule"
+ case Dot11InformationElementIDChallenge:
+ return "Challenge text"
+ case Dot11InformationElementIDPowerConst:
+ return "Power Constraint"
+ case Dot11InformationElementIDPowerCapability:
+ return "Power Capability"
+ case Dot11InformationElementIDTPCRequest:
+ return "TPC Request"
+ case Dot11InformationElementIDTPCReport:
+ return "TPC Report"
+ case Dot11InformationElementIDSupportedChannels:
+ return "Supported Channels"
+ case Dot11InformationElementIDSwitchChannelAnnounce:
+ return "Channel Switch Announcement"
+ case Dot11InformationElementIDMeasureRequest:
+ return "Measurement Request"
+ case Dot11InformationElementIDMeasureReport:
+ return "Measurement Report"
+ case Dot11InformationElementIDQuiet:
+ return "Quiet"
+ case Dot11InformationElementIDIBSSDFS:
+ return "IBSS DFS"
+ case Dot11InformationElementIDERPInfo:
+ return "ERP Information"
+ case Dot11InformationElementIDTSDelay:
+ return "TS Delay"
+ case Dot11InformationElementIDTCLASProcessing:
+ return "TCLAS Processing"
+ case Dot11InformationElementIDHTCapabilities:
+ return "HT Capabilities (802.11n D1.10)"
+ case Dot11InformationElementIDQOSCapability:
+ return "QOS Capability"
+ case Dot11InformationElementIDERPInfo2:
+ return "ERP Information-2"
+ case Dot11InformationElementIDRSNInfo:
+ return "RSN Information"
+ case Dot11InformationElementIDESRates:
+ return "Extended Supported Rates"
+ case Dot11InformationElementIDAPChannelReport:
+ return "AP Channel Report"
+ case Dot11InformationElementIDNeighborReport:
+ return "Neighbor Report"
+ case Dot11InformationElementIDRCPI:
+ return "RCPI"
+ case Dot11InformationElementIDMobilityDomain:
+ return "Mobility Domain"
+ case Dot11InformationElementIDFastBSSTrans:
+ return "Fast BSS Transition"
+ case Dot11InformationElementIDTimeoutInt:
+ return "Timeout Interval"
+ case Dot11InformationElementIDRICData:
+ return "RIC Data"
+ case Dot11InformationElementIDDSERegisteredLoc:
+ return "DSE Registered Location"
+ case Dot11InformationElementIDSuppOperatingClass:
+ return "Supported Operating Classes"
+ case Dot11InformationElementIDExtChanSwitchAnnounce:
+ return "Extended Channel Switch Announcement"
+ case Dot11InformationElementIDHTInfo:
+ return "HT Information (802.11n D1.10)"
+ case Dot11InformationElementIDSecChanOffset:
+ return "Secondary Channel Offset (802.11n D1.10)"
+ case Dot11InformationElementIDBSSAverageAccessDelay:
+ return "BSS Average Access Delay"
+ case Dot11InformationElementIDAntenna:
+ return "Antenna"
+ case Dot11InformationElementIDRSNI:
+ return "RSNI"
+ case Dot11InformationElementIDMeasurePilotTrans:
+ return "Measurement Pilot Transmission"
+ case Dot11InformationElementIDBSSAvailAdmCapacity:
+ return "BSS Available Admission Capacity"
+ case Dot11InformationElementIDBSSACAccDelayWAPIParam:
+ return "BSS AC Access Delay/WAPI Parameter Set"
+ case Dot11InformationElementIDTimeAdvertisement:
+ return "Time Advertisement"
+ case Dot11InformationElementIDRMEnabledCapabilities:
+ return "RM Enabled Capabilities"
+ case Dot11InformationElementIDMultipleBSSID:
+ return "Multiple BSSID"
+ case Dot11InformationElementID2040BSSCoExist:
+ return "20/40 BSS Coexistence"
+ case Dot11InformationElementID2040BSSIntChanReport:
+ return "20/40 BSS Intolerant Channel Report"
+ case Dot11InformationElementIDOverlapBSSScanParam:
+ return "Overlapping BSS Scan Parameters"
+ case Dot11InformationElementIDRICDescriptor:
+ return "RIC Descriptor"
+ case Dot11InformationElementIDManagementMIC:
+ return "Management MIC"
+ case Dot11InformationElementIDEventRequest:
+ return "Event Request"
+ case Dot11InformationElementIDEventReport:
+ return "Event Report"
+ case Dot11InformationElementIDDiagnosticRequest:
+ return "Diagnostic Request"
+ case Dot11InformationElementIDDiagnosticReport:
+ return "Diagnostic Report"
+ case Dot11InformationElementIDLocationParam:
+ return "Location Parameters"
+ case Dot11InformationElementIDNonTransBSSIDCapability:
+ return "Non Transmitted BSSID Capability"
+ case Dot11InformationElementIDSSIDList:
+ return "SSID List"
+ case Dot11InformationElementIDMultipleBSSIDIndex:
+ return "Multiple BSSID Index"
+ case Dot11InformationElementIDFMSDescriptor:
+ return "FMS Descriptor"
+ case Dot11InformationElementIDFMSRequest:
+ return "FMS Request"
+ case Dot11InformationElementIDFMSResponse:
+ return "FMS Response"
+ case Dot11InformationElementIDQOSTrafficCapability:
+ return "QoS Traffic Capability"
+ case Dot11InformationElementIDBSSMaxIdlePeriod:
+ return "BSS Max Idle Period"
+ case Dot11InformationElementIDTFSRequest:
+ return "TFS Request"
+ case Dot11InformationElementIDTFSResponse:
+ return "TFS Response"
+ case Dot11InformationElementIDWNMSleepMode:
+ return "WNM-Sleep Mode"
+ case Dot11InformationElementIDTIMBroadcastRequest:
+ return "TIM Broadcast Request"
+ case Dot11InformationElementIDTIMBroadcastResponse:
+ return "TIM Broadcast Response"
+ case Dot11InformationElementIDCollInterferenceReport:
+ return "Collocated Interference Report"
+ case Dot11InformationElementIDChannelUsage:
+ return "Channel Usage"
+ case Dot11InformationElementIDTimeZone:
+ return "Time Zone"
+ case Dot11InformationElementIDDMSRequest:
+ return "DMS Request"
+ case Dot11InformationElementIDDMSResponse:
+ return "DMS Response"
+ case Dot11InformationElementIDLinkIdentifier:
+ return "Link Identifier"
+ case Dot11InformationElementIDWakeupSchedule:
+ return "Wakeup Schedule"
+ case Dot11InformationElementIDChannelSwitchTiming:
+ return "Channel Switch Timing"
+ case Dot11InformationElementIDPTIControl:
+ return "PTI Control"
+ case Dot11InformationElementIDPUBufferStatus:
+ return "PU Buffer Status"
+ case Dot11InformationElementIDInterworking:
+ return "Interworking"
+ case Dot11InformationElementIDAdvertisementProtocol:
+ return "Advertisement Protocol"
+ case Dot11InformationElementIDExpBWRequest:
+ return "Expedited Bandwidth Request"
+ case Dot11InformationElementIDQOSMapSet:
+ return "QoS Map Set"
+ case Dot11InformationElementIDRoamingConsortium:
+ return "Roaming Consortium"
+ case Dot11InformationElementIDEmergencyAlertIdentifier:
+ return "Emergency Alert Identifier"
+ case Dot11InformationElementIDMeshConfiguration:
+ return "Mesh Configuration"
+ case Dot11InformationElementIDMeshID:
+ return "Mesh ID"
+ case Dot11InformationElementIDMeshLinkMetricReport:
+ return "Mesh Link Metric Report"
+ case Dot11InformationElementIDCongestionNotification:
+ return "Congestion Notification"
+ case Dot11InformationElementIDMeshPeeringManagement:
+ return "Mesh Peering Management"
+ case Dot11InformationElementIDMeshChannelSwitchParam:
+ return "Mesh Channel Switch Parameters"
+ case Dot11InformationElementIDMeshAwakeWindows:
+ return "Mesh Awake Windows"
+ case Dot11InformationElementIDBeaconTiming:
+ return "Beacon Timing"
+ case Dot11InformationElementIDMCCAOPSetupRequest:
+ return "MCCAOP Setup Request"
+ case Dot11InformationElementIDMCCAOPSetupReply:
+ return "MCCAOP SETUP Reply"
+ case Dot11InformationElementIDMCCAOPAdvertisement:
+ return "MCCAOP Advertisement"
+ case Dot11InformationElementIDMCCAOPTeardown:
+ return "MCCAOP Teardown"
+ case Dot11InformationElementIDGateAnnouncement:
+ return "Gate Announcement"
+ case Dot11InformationElementIDRootAnnouncement:
+ return "Root Announcement"
+ case Dot11InformationElementIDExtCapability:
+ return "Extended Capabilities"
+ case Dot11InformationElementIDAgereProprietary:
+ return "Agere Proprietary"
+ case Dot11InformationElementIDPathRequest:
+ return "Path Request"
+ case Dot11InformationElementIDPathReply:
+ return "Path Reply"
+ case Dot11InformationElementIDPathError:
+ return "Path Error"
+ case Dot11InformationElementIDCiscoCCX1CKIPDeviceName:
+ return "Cisco CCX1 CKIP + Device Name"
+ case Dot11InformationElementIDCiscoCCX2:
+ return "Cisco CCX2"
+ case Dot11InformationElementIDProxyUpdate:
+ return "Proxy Update"
+ case Dot11InformationElementIDProxyUpdateConfirmation:
+ return "Proxy Update Confirmation"
+ case Dot11InformationElementIDAuthMeshPerringExch:
+ return "Auhenticated Mesh Perring Exchange"
+ case Dot11InformationElementIDMIC:
+ return "MIC (Message Integrity Code)"
+ case Dot11InformationElementIDDestinationURI:
+ return "Destination URI"
+ case Dot11InformationElementIDUAPSDCoexistence:
+ return "U-APSD Coexistence"
+ case Dot11InformationElementIDWakeupSchedule80211ad:
+ return "Wakeup Schedule 802.11ad"
+ case Dot11InformationElementIDExtendedSchedule:
+ return "Extended Schedule"
+ case Dot11InformationElementIDSTAAvailability:
+ return "STA Availability"
+ case Dot11InformationElementIDDMGTSPEC:
+ return "DMG TSPEC"
+ case Dot11InformationElementIDNextDMGATI:
+ return "Next DMG ATI"
+ case Dot11InformationElementIDDMSCapabilities:
+ return "DMG Capabilities"
+ case Dot11InformationElementIDCiscoUnknown95:
+ return "Cisco Unknown 95"
+ case Dot11InformationElementIDVendor2:
+ return "Vendor Specific"
+ case Dot11InformationElementIDDMGOperating:
+ return "DMG Operating"
+ case Dot11InformationElementIDDMGBSSParamChange:
+ return "DMG BSS Parameter Change"
+ case Dot11InformationElementIDDMGBeamRefinement:
+ return "DMG Beam Refinement"
+ case Dot11InformationElementIDChannelMeasFeedback:
+ return "Channel Measurement Feedback"
+ case Dot11InformationElementIDAwakeWindow:
+ return "Awake Window"
+ case Dot11InformationElementIDMultiBand:
+ return "Multi Band"
+ case Dot11InformationElementIDADDBAExtension:
+ return "ADDBA Extension"
+ case Dot11InformationElementIDNEXTPCPList:
+ return "NEXTPCP List"
+ case Dot11InformationElementIDPCPHandover:
+ return "PCP Handover"
+ case Dot11InformationElementIDDMGLinkMargin:
+ return "DMG Link Margin"
+ case Dot11InformationElementIDSwitchingStream:
+ return "Switching Stream"
+ case Dot11InformationElementIDSessionTransmission:
+ return "Session Transmission"
+ case Dot11InformationElementIDDynamicTonePairReport:
+ return "Dynamic Tone Pairing Report"
+ case Dot11InformationElementIDClusterReport:
+ return "Cluster Report"
+ case Dot11InformationElementIDRelayCapabilities:
+ return "Relay Capabilities"
+ case Dot11InformationElementIDRelayTransferParameter:
+ return "Relay Transfer Parameter"
+ case Dot11InformationElementIDBeamlinkMaintenance:
+ return "Beamlink Maintenance"
+ case Dot11InformationElementIDMultipleMacSublayers:
+ return "Multiple MAC Sublayers"
+ case Dot11InformationElementIDUPID:
+ return "U-PID"
+ case Dot11InformationElementIDDMGLinkAdaptionAck:
+ return "DMG Link Adaption Acknowledgment"
+ case Dot11InformationElementIDSymbolProprietary:
+ return "Symbol Proprietary"
+ case Dot11InformationElementIDMCCAOPAdvertOverview:
+ return "MCCAOP Advertisement Overview"
+ case Dot11InformationElementIDQuietPeriodRequest:
+ return "Quiet Period Request"
+ case Dot11InformationElementIDQuietPeriodResponse:
+ return "Quiet Period Response"
+ case Dot11InformationElementIDECPACPolicy:
+ return "ECPAC Policy"
+ case Dot11InformationElementIDClusterTimeOffset:
+ return "Cluster Time Offset"
+ case Dot11InformationElementIDAntennaSectorID:
+ return "Antenna Sector ID"
+ case Dot11InformationElementIDVHTCapabilities:
+ return "VHT Capabilities (IEEE Std 802.11ac/D3.1)"
+ case Dot11InformationElementIDVHTOperation:
+ return "VHT Operation (IEEE Std 802.11ac/D3.1)"
+ case Dot11InformationElementIDExtendedBSSLoad:
+ return "Extended BSS Load"
+ case Dot11InformationElementIDWideBWChannelSwitch:
+ return "Wide Bandwidth Channel Switch"
+ case Dot11InformationElementIDVHTTxPowerEnvelope:
+ return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)"
+ case Dot11InformationElementIDChannelSwitchWrapper:
+ return "Channel Switch Wrapper"
+ case Dot11InformationElementIDOperatingModeNotification:
+ return "Operating Mode Notification"
+ case Dot11InformationElementIDUPSIM:
+ return "UP SIM"
+ case Dot11InformationElementIDReducedNeighborReport:
+ return "Reduced Neighbor Report"
+ case Dot11InformationElementIDTVHTOperation:
+ return "TVHT Op"
+ case Dot11InformationElementIDDeviceLocation:
+ return "Device Location"
+ case Dot11InformationElementIDWhiteSpaceMap:
+ return "White Space Map"
+ case Dot11InformationElementIDFineTuningMeasureParams:
+ return "Fine Tuning Measure Parameters"
+ case Dot11InformationElementIDVendor:
+ return "Vendor"
+ default:
+ return "Unknown information element id"
+ }
+}
+
+// Dot11 provides an IEEE 802.11 base packet header.
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html
+// for excruciating detail.
+type Dot11 struct {
+ BaseLayer
+ Type Dot11Type
+ Proto uint8
+ Flags Dot11Flags
+ DurationID uint16
+ Address1 net.HardwareAddr
+ Address2 net.HardwareAddr
+ Address3 net.HardwareAddr
+ Address4 net.HardwareAddr
+ SequenceNumber uint16
+ FragmentNumber uint16
+ Checksum uint32
+ QOS *Dot11QOS
+ HTControl *Dot11HTControl
+ DataLayer gopacket.Layer
+}
+
+type Dot11QOS struct {
+ TID uint8 /* Traffic IDentifier */
+ EOSP bool /* End of service period */
+ AckPolicy Dot11AckPolicy
+ TXOP uint8
+}
+
+type Dot11HTControl struct {
+ ACConstraint bool
+ RDGMorePPDU bool
+
+ VHT *Dot11HTControlVHT
+ HT *Dot11HTControlHT
+}
+
+type Dot11HTControlHT struct {
+ LinkAdapationControl *Dot11LinkAdapationControl
+ CalibrationPosition uint8
+ CalibrationSequence uint8
+ CSISteering uint8
+ NDPAnnouncement bool
+ DEI bool
+}
+
+type Dot11HTControlVHT struct {
+ MRQ bool
+ UnsolicitedMFB bool
+ MSI *uint8
+ MFB Dot11HTControlMFB
+ CompressedMSI *uint8
+ STBCIndication bool
+ MFSI *uint8
+ GID *uint8
+ CodingType *Dot11CodingType
+ FbTXBeamformed bool
+}
+
+type Dot11HTControlMFB struct {
+ NumSTS uint8
+ VHTMCS uint8
+ BW uint8
+ SNR int8
+}
+
+type Dot11LinkAdapationControl struct {
+ TRQ bool
+ MRQ bool
+ MSI uint8
+ MFSI uint8
+ ASEL *Dot11ASEL
+ MFB *uint8
+}
+
+type Dot11ASEL struct {
+ Command uint8
+ Data uint8
+}
+
+type Dot11CodingType uint8
+
+const (
+ Dot11CodingTypeBCC = 0
+ Dot11CodingTypeLDPC = 1
+)
+
+func (a Dot11CodingType) String() string {
+ switch a {
+ case Dot11CodingTypeBCC:
+ return "BCC"
+ case Dot11CodingTypeLDPC:
+ return "LDPC"
+ default:
+ return "Unknown coding type"
+ }
+}
+
+func (m *Dot11HTControlMFB) NoFeedBackPresent() bool {
+ return m.VHTMCS == 15 && m.NumSTS == 7
+}
+
+func decodeDot11(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ if d.DataLayer != nil {
+ p.AddLayer(d.DataLayer)
+ }
+ return p.NextDecoder(d.NextLayerType())
+}
+
+func (m *Dot11) LayerType() gopacket.LayerType { return LayerTypeDot11 }
+func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 }
+func (m *Dot11) NextLayerType() gopacket.LayerType {
+ if m.DataLayer != nil {
+ if m.Flags.WEP() {
+ return LayerTypeDot11WEP
+ }
+ return m.DataLayer.(gopacket.DecodingLayer).NextLayerType()
+ }
+ return m.Type.LayerType()
+}
+
+func createU8(x uint8) *uint8 {
+ return &x
+}
+
+var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{
+ Dot11TypeData: func() gopacket.DecodingLayer { return &Dot11Data{} },
+ Dot11TypeDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataCFAck{} },
+ Dot11TypeDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} },
+ Dot11TypeDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} },
+ Dot11TypeDataNull: func() gopacket.DecodingLayer { return &Dot11DataNull{} },
+ Dot11TypeDataCFAckNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} },
+ Dot11TypeDataCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} },
+ Dot11TypeDataCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} },
+ Dot11TypeDataQOSData: func() gopacket.DecodingLayer { return &Dot11DataQOSData{} },
+ Dot11TypeDataQOSDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} },
+ Dot11TypeDataQOSDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} },
+ Dot11TypeDataQOSDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} },
+ Dot11TypeDataQOSNull: func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} },
+ Dot11TypeDataQOSCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} },
+ Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} },
+}
+
+func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 10 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10)
+ }
+ m.Type = Dot11Type((data[0])&0xFC) >> 2
+
+ m.Proto = uint8(data[0]) & 0x0003
+ m.Flags = Dot11Flags(data[1])
+ m.DurationID = binary.LittleEndian.Uint16(data[2:4])
+ m.Address1 = net.HardwareAddr(data[4:10])
+
+ offset := 10
+
+ mainType := m.Type.MainType()
+
+ switch mainType {
+ case Dot11TypeCtrl:
+ switch m.Type {
+ case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+ if len(data) < offset+6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.Address2 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ }
+ case Dot11TypeMgmt, Dot11TypeData:
+ if len(data) < offset+14 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14)
+ }
+ m.Address2 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ m.Address3 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+
+ m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4
+ m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F)
+ offset += 2
+ }
+
+ if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+ if len(data) < offset+6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.Address4 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ }
+
+ if m.Type.QOS() {
+ if len(data) < offset+2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.QOS = &Dot11QOS{
+ TID: (uint8(data[offset]) & 0x0F),
+ EOSP: (uint8(data[offset]) & 0x10) == 0x10,
+ AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5),
+ TXOP: uint8(data[offset+1]),
+ }
+ offset += 2
+ }
+ if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) {
+ if len(data) < offset+4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+
+ htc := &Dot11HTControl{
+ ACConstraint: data[offset+3]&0x40 != 0,
+ RDGMorePPDU: data[offset+3]&0x80 != 0,
+ }
+ m.HTControl = htc
+
+ if data[offset]&0x1 != 0 { // VHT Variant
+ vht := &Dot11HTControlVHT{}
+ htc.VHT = vht
+ vht.MRQ = data[offset]&0x4 != 0
+ vht.UnsolicitedMFB = data[offset+3]&0x20 != 0
+ vht.MFB = Dot11HTControlMFB{
+ NumSTS: uint8(data[offset+1] >> 1 & 0x7),
+ VHTMCS: uint8(data[offset+1] >> 4 & 0xF),
+ BW: uint8(data[offset+2] & 0x3),
+ SNR: int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22,
+ }
+
+ if vht.UnsolicitedMFB {
+ if !vht.MFB.NoFeedBackPresent() {
+ vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3)
+ vht.STBCIndication = data[offset]&0x20 != 0
+ vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1))
+ vht.FbTXBeamformed = data[offset+3]&0x10 != 0
+ vht.GID = createU8(
+ data[offset]>>6 +
+ (data[offset+1] & 0x1 << 2) +
+ data[offset+3]&0x7<<3)
+ }
+ } else {
+ if vht.MRQ {
+ vht.MSI = createU8((data[offset] >> 3) & 0x07)
+ }
+ vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2))
+ }
+
+ } else { // HT Variant
+ ht := &Dot11HTControlHT{}
+ htc.HT = ht
+
+ lac := &Dot11LinkAdapationControl{}
+ ht.LinkAdapationControl = lac
+ lac.TRQ = data[offset]&0x2 != 0
+ lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3
+ if data[offset]&0x3C == 0x38 { // ASEL
+ lac.ASEL = &Dot11ASEL{
+ Command: data[offset+1] >> 1 & 0x7,
+ Data: data[offset+1] >> 4 & 0xF,
+ }
+ } else {
+ lac.MRQ = data[offset]&0x4 != 0
+ if lac.MRQ {
+ lac.MSI = data[offset] >> 3 & 0x7
+ }
+ lac.MFB = createU8(data[offset+1] >> 1)
+ }
+ ht.CalibrationPosition = data[offset+2] & 0x3
+ ht.CalibrationSequence = data[offset+2] >> 2 & 0x3
+ ht.CSISteering = data[offset+2] >> 6 & 0x3
+ ht.NDPAnnouncement = data[offset+3]&0x1 != 0
+ if mainType != Dot11TypeMgmt {
+ ht.DEI = data[offset+3]&0x20 != 0
+ }
+ }
+
+ offset += 4
+ }
+
+ if len(data) < offset+4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4)
+ }
+
+ m.BaseLayer = BaseLayer{
+ Contents: data[0:offset],
+ Payload: data[offset : len(data)-4],
+ }
+
+ if mainType == Dot11TypeData {
+ l := dataDecodeMap[m.Type]()
+ err := l.DecodeFromBytes(m.BaseLayer.Payload, df)
+ if err != nil {
+ return err
+ }
+ m.DataLayer = l.(gopacket.Layer)
+ }
+
+ m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)])
+ return nil
+}
+
+func (m *Dot11) ChecksumValid() bool {
+ // only for CTRL and MGMT frames
+ h := crc32.NewIEEE()
+ h.Write(m.Contents)
+ h.Write(m.Payload)
+ return m.Checksum == h.Sum32()
+}
+
+func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(24)
+
+ if err != nil {
+ return err
+ }
+
+ buf[0] = (uint8(m.Type) << 2) | m.Proto
+ buf[1] = uint8(m.Flags)
+
+ binary.LittleEndian.PutUint16(buf[2:4], m.DurationID)
+
+ copy(buf[4:10], m.Address1)
+
+ offset := 10
+
+ switch m.Type.MainType() {
+ case Dot11TypeCtrl:
+ switch m.Type {
+ case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+ copy(buf[offset:offset+6], m.Address2)
+ offset += 6
+ }
+ case Dot11TypeMgmt, Dot11TypeData:
+ copy(buf[offset:offset+6], m.Address2)
+ offset += 6
+ copy(buf[offset:offset+6], m.Address3)
+ offset += 6
+
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber)
+ offset += 2
+ }
+
+ if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+ copy(buf[offset:offset+6], m.Address4)
+ offset += 6
+ }
+
+ return nil
+}
+
+// Dot11Mgmt is a base for all IEEE 802.11 management layers.
+type Dot11Mgmt struct {
+ BaseLayer
+}
+
+func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+// Dot11Ctrl is a base for all IEEE 802.11 control layers.
+type Dot11Ctrl struct {
+ BaseLayer
+}
+
+func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11Ctrl) LayerType() gopacket.LayerType { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11Ctrl{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11WEP contains WEP encrpted IEEE 802.11 data.
+type Dot11WEP struct {
+ BaseLayer
+}
+
+func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11WEP) LayerType() gopacket.LayerType { return LayerTypeDot11WEP }
+func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP }
+func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11WEP{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11Data is a base for all IEEE 802.11 data layers.
+type Dot11Data struct {
+ BaseLayer
+}
+
+func (m *Dot11Data) NextLayerType() gopacket.LayerType {
+ return LayerTypeLLC
+}
+
+func (m *Dot11Data) LayerType() gopacket.LayerType { return LayerTypeDot11Data }
+func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data }
+func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Payload = data
+ return nil
+}
+
+func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11Data{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11DataCFAck struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPoll struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPoll struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataNull struct {
+ Dot11Data
+}
+
+func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataNull{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPollNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPollNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataQOS struct {
+ Dot11Ctrl
+}
+
+func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.BaseLayer = BaseLayer{Payload: data}
+ return nil
+}
+
+type Dot11DataQOSData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSData) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSData }
+func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData }
+
+func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11Data
+}
+
+type Dot11DataQOSDataCFAck struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+
+type Dot11DataQOSDataCFPoll struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+
+type Dot11DataQOSDataCFAckPoll struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFAckPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPoll
+}
+
+type Dot11DataQOSNull struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSNull{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+
+type Dot11DataQOSCFPollNoData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSCFPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFPollNoData
+}
+
+type Dot11DataQOSCFAckPollNoData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSCFAckPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+
+type Dot11InformationElement struct {
+ BaseLayer
+ ID Dot11InformationElementID
+ Length uint8
+ OUI []byte
+ Info []byte
+}
+
+func (m *Dot11InformationElement) LayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2)
+ }
+ m.ID = Dot11InformationElementID(data[0])
+ m.Length = data[1]
+ offset := int(2)
+
+ if len(data) < offset+int(m.Length) {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length))
+ }
+ if m.ID == 221 {
+ // Vendor extension
+ m.OUI = data[offset : offset+4]
+ m.Info = data[offset+4 : offset+int(m.Length)]
+ } else {
+ m.Info = data[offset : offset+int(m.Length)]
+ }
+
+ offset += int(m.Length)
+
+ m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]}
+ return nil
+}
+
+func (d *Dot11InformationElement) String() string {
+ if d.ID == 0 {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info))
+ } else if d.ID == 1 {
+ rates := ""
+ for i := 0; i < len(d.Info); i++ {
+ if d.Info[i]&0x80 == 0 {
+ rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5)
+ } else {
+ rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5)
+ }
+ }
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates)
+ } else if d.ID == 221 {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info)
+ } else {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info)
+ }
+}
+
+func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := len(m.Info) + len(m.OUI)
+ if buf, err := b.PrependBytes(2 + length); err != nil {
+ return err
+ } else {
+ buf[0] = uint8(m.ID)
+ buf[1] = uint8(length)
+ copy(buf[2:], m.OUI)
+ copy(buf[2+len(m.OUI):], m.Info)
+ }
+ return nil
+}
+
+func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11InformationElement{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11CtrlCTS struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCTS{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlRTS struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlRTS{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAckReq struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlBlockAckReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlBlockAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlPowersavePoll struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlPowersavePoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEnd struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCFEnd{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEndAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCFEndAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11MgmtAssociationReq struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ ListenInterval uint16
+}
+
+func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAssociationReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+ m.Payload = data[4:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(4)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+ return nil
+}
+
+type Dot11MgmtAssociationResp struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ Status Dot11Status
+ AID uint16
+}
+
+func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAssociationResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4]))
+ m.AID = binary.LittleEndian.Uint16(data[4:6])
+ m.Payload = data[6:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(6)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status))
+ binary.LittleEndian.PutUint16(buf[4:6], m.AID)
+
+ return nil
+}
+
+type Dot11MgmtReassociationReq struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ ListenInterval uint16
+ CurrentApAddress net.HardwareAddr
+}
+
+func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtReassociationReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 10 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+ m.CurrentApAddress = net.HardwareAddr(data[4:10])
+ m.Payload = data[10:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(10)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+ copy(buf[4:10], m.CurrentApAddress)
+
+ return nil
+}
+
+type Dot11MgmtReassociationResp struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtReassociationResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeReq struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtProbeReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeResp struct {
+ Dot11Mgmt
+ Timestamp uint64
+ Interval uint16
+ Flags uint16
+}
+
+func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtProbeResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+
+ return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12)
+ }
+
+ m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+ m.Interval = binary.LittleEndian.Uint16(data[8:10])
+ m.Flags = binary.LittleEndian.Uint16(data[10:12])
+ m.Payload = data[12:]
+
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(12)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+ binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+ binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+ return nil
+}
+
+type Dot11MgmtMeasurementPilot struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtMeasurementPilot{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtMeasurementPilot
+}
+func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtMeasurementPilot
+}
+
+type Dot11MgmtBeacon struct {
+ Dot11Mgmt
+ Timestamp uint64
+ Interval uint16
+ Flags uint16
+}
+
+func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtBeacon{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12)
+ }
+ m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+ m.Interval = binary.LittleEndian.Uint16(data[8:10])
+ m.Flags = binary.LittleEndian.Uint16(data[10:12])
+ m.Payload = data[12:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement }
+
+func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(12)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+ binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+ binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+ return nil
+}
+
+type Dot11MgmtATIM struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtATIM{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtATIM }
+func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM }
+
+type Dot11MgmtDisassociation struct {
+ Dot11Mgmt
+ Reason Dot11Reason
+}
+
+func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtDisassociation{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2)
+ }
+ m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(2)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+ return nil
+}
+
+type Dot11MgmtAuthentication struct {
+ Dot11Mgmt
+ Algorithm Dot11Algorithm
+ Sequence uint16
+ Status Dot11Status
+}
+
+func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAuthentication{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6)
+ }
+ m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2]))
+ m.Sequence = binary.LittleEndian.Uint16(data[2:4])
+ m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6]))
+ m.Payload = data[6:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(6)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm))
+ binary.LittleEndian.PutUint16(buf[2:4], m.Sequence)
+ binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status))
+
+ return nil
+}
+
+type Dot11MgmtDeauthentication struct {
+ Dot11Mgmt
+ Reason Dot11Reason
+}
+
+func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtDeauthentication{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2)
+ }
+ m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(2)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+ return nil
+}
+
+type Dot11MgmtAction struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAction{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAction) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtAction }
+func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction }
+
+type Dot11MgmtActionNoAck struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtActionNoAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtActionNoAck }
+func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck }
+
+type Dot11MgmtArubaWLAN struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtArubaWLAN{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtArubaWLAN }
+func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN }
diff --git a/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/google/gopacket/layers/dot1q.go
new file mode 100644
index 0000000..47f93d7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot1q.go
@@ -0,0 +1,71 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// Dot1Q is the packet layer for 802.1Q VLAN headers.
+type Dot1Q struct {
+ BaseLayer
+ Priority uint8
+ DropEligible bool
+ VLANIdentifier uint16
+ Type EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeDot1Q
+func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.Priority = (data[0] & 0xE0) >> 5
+ d.DropEligible = data[0]&0x10 != 0
+ d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF
+ d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *Dot1Q) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot1Q
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *Dot1Q) NextLayerType() gopacket.LayerType {
+ return d.Type.LayerType()
+}
+
+func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot1Q{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ if d.VLANIdentifier > 0xFFF {
+ return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier)
+ }
+ firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier
+ if d.DropEligible {
+ firstBytes |= 0x1000
+ }
+ binary.BigEndian.PutUint16(bytes, firstBytes)
+ binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type))
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/google/gopacket/layers/eap.go
new file mode 100644
index 0000000..250f857
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eap.go
@@ -0,0 +1,106 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+type EAPCode uint8
+type EAPType uint8
+
+const (
+ EAPCodeRequest EAPCode = 1
+ EAPCodeResponse EAPCode = 2
+ EAPCodeSuccess EAPCode = 3
+ EAPCodeFailure EAPCode = 4
+
+ // EAPTypeNone means that this EAP layer has no Type or TypeData.
+ // Success and Failure EAPs will have this set.
+ EAPTypeNone EAPType = 0
+
+ EAPTypeIdentity EAPType = 1
+ EAPTypeNotification EAPType = 2
+ EAPTypeNACK EAPType = 3
+ EAPTypeOTP EAPType = 4
+ EAPTypeTokenCard EAPType = 5
+)
+
+// EAP defines an Extensible Authentication Protocol (rfc 3748) layer.
+type EAP struct {
+ BaseLayer
+ Code EAPCode
+ Id uint8
+ Length uint16
+ Type EAPType
+ TypeData []byte
+}
+
+// LayerType returns LayerTypeEAP.
+func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Code = EAPCode(data[0])
+ e.Id = data[1]
+ e.Length = binary.BigEndian.Uint16(data[2:4])
+ switch {
+ case e.Length > 4:
+ e.Type = EAPType(data[4])
+ e.TypeData = data[5:]
+ case e.Length == 4:
+ e.Type = 0
+ e.TypeData = nil
+ default:
+ return fmt.Errorf("invalid EAP length %d", e.Length)
+ }
+ e.BaseLayer.Contents = data[:e.Length]
+ e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if opts.FixLengths {
+ e.Length = uint16(len(e.TypeData) + 1)
+ }
+ size := len(e.TypeData) + 4
+ if size > 4 {
+ size++
+ }
+ bytes, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ bytes[0] = byte(e.Code)
+ bytes[1] = e.Id
+ binary.BigEndian.PutUint16(bytes[2:], e.Length)
+ if size > 4 {
+ bytes[4] = byte(e.Type)
+ copy(bytes[5:], e.TypeData)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAP) CanDecode() gopacket.LayerClass {
+ return LayerTypeEAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+func decodeEAP(data []byte, p gopacket.PacketBuilder) error {
+ e := &EAP{}
+ return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go
new file mode 100644
index 0000000..12aa5ba
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eapol.go
@@ -0,0 +1,298 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// EAPOL defines an EAP over LAN (802.1x) layer.
+type EAPOL struct {
+ BaseLayer
+ Version uint8
+ Type EAPOLType
+ Length uint16
+}
+
+// LayerType returns LayerTypeEAPOL.
+func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Version = data[0]
+ e.Type = EAPOLType(data[1])
+ e.Length = binary.BigEndian.Uint16(data[2:4])
+ e.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer
+func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, _ := b.PrependBytes(4)
+ bytes[0] = e.Version
+ bytes[1] = byte(e.Type)
+ binary.BigEndian.PutUint16(bytes[2:], e.Length)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAPOL) CanDecode() gopacket.LayerClass {
+ return LayerTypeEAPOL
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAPOL) NextLayerType() gopacket.LayerType {
+ return e.Type.LayerType()
+}
+
+func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error {
+ e := &EAPOL{}
+ return decodingLayerDecoder(e, data, p)
+}
+
+// EAPOLKeyDescriptorType is an enumeration of key descriptor types
+// as specified by 802.1x in the EAPOL-Key frame
+type EAPOLKeyDescriptorType uint8
+
+// Enumeration of EAPOLKeyDescriptorType
+const (
+ EAPOLKeyDescriptorTypeRC4 EAPOLKeyDescriptorType = 1
+ EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2
+ EAPOLKeyDescriptorTypeWPA EAPOLKeyDescriptorType = 254
+)
+
+func (kdt EAPOLKeyDescriptorType) String() string {
+ switch kdt {
+ case EAPOLKeyDescriptorTypeRC4:
+ return "RC4"
+ case EAPOLKeyDescriptorTypeDot11:
+ return "802.11"
+ case EAPOLKeyDescriptorTypeWPA:
+ return "WPA"
+ default:
+ return fmt.Sprintf("unknown descriptor type %d", kdt)
+ }
+}
+
+// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the
+// encryption algorithm for the key data and the authentication for the
+// message integrity code (MIC)
+type EAPOLKeyDescriptorVersion uint8
+
+// Enumeration of EAPOLKeyDescriptorVersion
+const (
+ EAPOLKeyDescriptorVersionOther EAPOLKeyDescriptorVersion = 0
+ EAPOLKeyDescriptorVersionRC4HMACMD5 EAPOLKeyDescriptorVersion = 1
+ EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2
+ EAPOLKeyDescriptorVersionAES128CMAC EAPOLKeyDescriptorVersion = 3
+)
+
+func (v EAPOLKeyDescriptorVersion) String() string {
+ switch v {
+ case EAPOLKeyDescriptorVersionOther:
+ return "Other"
+ case EAPOLKeyDescriptorVersionRC4HMACMD5:
+ return "RC4-HMAC-MD5"
+ case EAPOLKeyDescriptorVersionAESHMACSHA1:
+ return "AES-HMAC-SHA1-128"
+ case EAPOLKeyDescriptorVersionAES128CMAC:
+ return "AES-128-CMAC"
+ default:
+ return fmt.Sprintf("unknown version %d", v)
+ }
+}
+
+// EAPOLKeyType is an enumeration of key derivation types describing
+// the purpose of the keys being derived.
+type EAPOLKeyType uint8
+
+// Enumeration of EAPOLKeyType
+const (
+ EAPOLKeyTypeGroupSMK EAPOLKeyType = 0
+ EAPOLKeyTypePairwise EAPOLKeyType = 1
+)
+
+func (kt EAPOLKeyType) String() string {
+ switch kt {
+ case EAPOLKeyTypeGroupSMK:
+ return "Group/SMK"
+ case EAPOLKeyTypePairwise:
+ return "Pairwise"
+ default:
+ return fmt.Sprintf("unknown key type %d", kt)
+ }
+}
+
+// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication
+type EAPOLKey struct {
+ BaseLayer
+ KeyDescriptorType EAPOLKeyDescriptorType
+ KeyDescriptorVersion EAPOLKeyDescriptorVersion
+ KeyType EAPOLKeyType
+ KeyIndex uint8
+ Install bool
+ KeyACK bool
+ KeyMIC bool
+ Secure bool
+ MICError bool
+ Request bool
+ HasEncryptedKeyData bool
+ SMKMessage bool
+ KeyLength uint16
+ ReplayCounter uint64
+ Nonce []byte
+ IV []byte
+ RSC uint64
+ ID uint64
+ MIC []byte
+ KeyDataLength uint16
+ EncryptedKeyData []byte
+}
+
+// LayerType returns LayerTypeEAPOLKey.
+func (ek *EAPOLKey) LayerType() gopacket.LayerType {
+ return LayerTypeEAPOLKey
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ek *EAPOLKey) CanDecode() gopacket.LayerType {
+ return LayerTypeEAPOLKey
+}
+
+// NextLayerType returns layers.LayerTypeDot11InformationElement if the key
+// data exists and is unencrypted, otherwise it does not expect a next layer.
+func (ek *EAPOLKey) NextLayerType() gopacket.LayerType {
+ if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 {
+ return LayerTypeDot11InformationElement
+ }
+ return gopacket.LayerTypePayload
+}
+
+const eapolKeyFrameLen = 95
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < eapolKeyFrameLen {
+ df.SetTruncated()
+ return fmt.Errorf("EAPOLKey length %v too short, %v required",
+ len(data), eapolKeyFrameLen)
+ }
+
+ ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0])
+
+ info := binary.BigEndian.Uint16(data[1:3])
+ ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007)
+ ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3)
+ ek.KeyIndex = uint8((info & 0x0030) >> 4)
+ ek.Install = (info & 0x0040) != 0
+ ek.KeyACK = (info & 0x0080) != 0
+ ek.KeyMIC = (info & 0x0100) != 0
+ ek.Secure = (info & 0x0200) != 0
+ ek.MICError = (info & 0x0400) != 0
+ ek.Request = (info & 0x0800) != 0
+ ek.HasEncryptedKeyData = (info & 0x1000) != 0
+ ek.SMKMessage = (info & 0x2000) != 0
+
+ ek.KeyLength = binary.BigEndian.Uint16(data[3:5])
+ ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13])
+
+ ek.Nonce = data[13:45]
+ ek.IV = data[45:61]
+ ek.RSC = binary.BigEndian.Uint64(data[61:69])
+ ek.ID = binary.BigEndian.Uint64(data[69:77])
+ ek.MIC = data[77:93]
+
+ ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95])
+
+ totalLength := eapolKeyFrameLen + int(ek.KeyDataLength)
+ if len(data) < totalLength {
+ df.SetTruncated()
+ return fmt.Errorf("EAPOLKey data length %d too short, %d required",
+ len(data)-eapolKeyFrameLen, ek.KeyDataLength)
+ }
+
+ if ek.HasEncryptedKeyData {
+ ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength]
+ ek.BaseLayer = BaseLayer{
+ Contents: data[:totalLength],
+ Payload: data[totalLength:],
+ }
+ } else {
+ ek.BaseLayer = BaseLayer{
+ Contents: data[:eapolKeyFrameLen],
+ Payload: data[eapolKeyFrameLen:],
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData))
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(ek.KeyDescriptorType)
+
+ var info uint16
+ info |= uint16(ek.KeyDescriptorVersion)
+ info |= uint16(ek.KeyType) << 3
+ info |= uint16(ek.KeyIndex) << 4
+ if ek.Install {
+ info |= 0x0040
+ }
+ if ek.KeyACK {
+ info |= 0x0080
+ }
+ if ek.KeyMIC {
+ info |= 0x0100
+ }
+ if ek.Secure {
+ info |= 0x0200
+ }
+ if ek.MICError {
+ info |= 0x0400
+ }
+ if ek.Request {
+ info |= 0x0800
+ }
+ if ek.HasEncryptedKeyData {
+ info |= 0x1000
+ }
+ if ek.SMKMessage {
+ info |= 0x2000
+ }
+ binary.BigEndian.PutUint16(buf[1:3], info)
+
+ binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength)
+ binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter)
+
+ copy(buf[13:45], ek.Nonce)
+ copy(buf[45:61], ek.IV)
+ binary.BigEndian.PutUint64(buf[61:69], ek.RSC)
+ binary.BigEndian.PutUint64(buf[69:77], ek.ID)
+ copy(buf[77:93], ek.MIC)
+
+ binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength)
+ if len(ek.EncryptedKeyData) > 0 {
+ copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData)
+ }
+
+ return nil
+}
+
+func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error {
+ ek := &EAPOLKey{}
+ return decodingLayerDecoder(ek, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/google/gopacket/layers/endpoints.go
new file mode 100644
index 0000000..4c91cc3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/endpoints.go
@@ -0,0 +1,97 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+ "net"
+ "strconv"
+)
+
+var (
+ // We use two different endpoint types for IPv4 vs IPv6 addresses, so that
+ // ordering with endpointA.LessThan(endpointB) sanely groups all IPv4
+ // addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses.
+ EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string {
+ return net.IP(b).String()
+ }})
+ EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string {
+ return net.IP(b).String()
+ }})
+
+ EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string {
+ return net.HardwareAddr(b).String()
+ }})
+ EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(b[0]))
+ }})
+ EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string {
+ return "point"
+ }})
+)
+
+// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address.
+// It returns gopacket.InvalidEndpoint if the IP address is invalid.
+func NewIPEndpoint(a net.IP) gopacket.Endpoint {
+ ipv4 := a.To4()
+ if ipv4 != nil {
+ return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4))
+ }
+
+ ipv6 := a.To16()
+ if ipv6 != nil {
+ return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6))
+ }
+
+ return gopacket.InvalidEndpoint
+}
+
+// NewMACEndpoint returns a new MAC address endpoint.
+func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointMAC, []byte(a))
+}
+func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint {
+ return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)})
+}
+
+// NewTCPPortEndpoint returns an endpoint based on a TCP port.
+func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointTCPPort, uint16(p))
+}
+
+// NewUDPPortEndpoint returns an endpoint based on a UDP port.
+func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointUDPPort, uint16(p))
+}
+
+// NewSCTPPortEndpoint returns an endpoint based on a SCTP port.
+func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointSCTPPort, uint16(p))
+}
+
+// NewRUDPPortEndpoint returns an endpoint based on a RUDP port.
+func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)})
+}
+
+// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port.
+func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointUDPLitePort, uint16(p))
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/google/gopacket/layers/enums.go
new file mode 100644
index 0000000..fa443e6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums.go
@@ -0,0 +1,448 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+
+ "github.com/google/gopacket"
+)
+
+// EnumMetadata keeps track of a set of metadata for each enumeration value
+// for protocol enumerations.
+type EnumMetadata struct {
+ // DecodeWith is the decoder to use to decode this protocol's data.
+ DecodeWith gopacket.Decoder
+ // Name is the name of the enumeration value.
+ Name string
+ // LayerType is the layer type implied by the given enum.
+ LayerType gopacket.LayerType
+}
+
+// errorFunc returns a decoder that spits out a specific error message.
+func errorFunc(msg string) gopacket.Decoder {
+ var e = errors.New(msg)
+ return gopacket.DecodeFunc(func([]byte, gopacket.PacketBuilder) error {
+ return e
+ })
+}
+
+// EthernetType is an enumeration of ethernet type values, and acts as a decoder
+// for any type it supports.
+type EthernetType uint16
+
+const (
+ // EthernetTypeLLC is not an actual ethernet type. It is instead a
+ // placeholder we use in Ethernet frames that use the 802.3 standard of
+ // srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype.
+ EthernetTypeLLC EthernetType = 0
+ EthernetTypeIPv4 EthernetType = 0x0800
+ EthernetTypeARP EthernetType = 0x0806
+ EthernetTypeIPv6 EthernetType = 0x86DD
+ EthernetTypeCiscoDiscovery EthernetType = 0x2000
+ EthernetTypeNortelDiscovery EthernetType = 0x01a2
+ EthernetTypeTransparentEthernetBridging EthernetType = 0x6558
+ EthernetTypeDot1Q EthernetType = 0x8100
+ EthernetTypePPP EthernetType = 0x880b
+ EthernetTypePPPoEDiscovery EthernetType = 0x8863
+ EthernetTypePPPoESession EthernetType = 0x8864
+ EthernetTypeMPLSUnicast EthernetType = 0x8847
+ EthernetTypeMPLSMulticast EthernetType = 0x8848
+ EthernetTypeEAPOL EthernetType = 0x888e
+ EthernetTypeQinQ EthernetType = 0x88a8
+ EthernetTypeLinkLayerDiscovery EthernetType = 0x88cc
+ EthernetTypeEthernetCTP EthernetType = 0x9000
+)
+
+// IPProtocol is an enumeration of IP protocol values, and acts as a decoder
+// for any type it supports.
+type IPProtocol uint8
+
+const (
+ IPProtocolIPv6HopByHop IPProtocol = 0
+ IPProtocolICMPv4 IPProtocol = 1
+ IPProtocolIGMP IPProtocol = 2
+ IPProtocolIPv4 IPProtocol = 4
+ IPProtocolTCP IPProtocol = 6
+ IPProtocolUDP IPProtocol = 17
+ IPProtocolRUDP IPProtocol = 27
+ IPProtocolIPv6 IPProtocol = 41
+ IPProtocolIPv6Routing IPProtocol = 43
+ IPProtocolIPv6Fragment IPProtocol = 44
+ IPProtocolGRE IPProtocol = 47
+ IPProtocolESP IPProtocol = 50
+ IPProtocolAH IPProtocol = 51
+ IPProtocolICMPv6 IPProtocol = 58
+ IPProtocolNoNextHeader IPProtocol = 59
+ IPProtocolIPv6Destination IPProtocol = 60
+ IPProtocolOSPF IPProtocol = 89
+ IPProtocolIPIP IPProtocol = 94
+ IPProtocolEtherIP IPProtocol = 97
+ IPProtocolVRRP IPProtocol = 112
+ IPProtocolSCTP IPProtocol = 132
+ IPProtocolUDPLite IPProtocol = 136
+ IPProtocolMPLSInIP IPProtocol = 137
+)
+
+// LinkType is an enumeration of link types, and acts as a decoder for any
+// link type it supports.
+type LinkType uint8
+
+const (
+ // According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html
+ LinkTypeNull LinkType = 0
+ LinkTypeEthernet LinkType = 1
+ LinkTypeAX25 LinkType = 3
+ LinkTypeTokenRing LinkType = 6
+ LinkTypeArcNet LinkType = 7
+ LinkTypeSLIP LinkType = 8
+ LinkTypePPP LinkType = 9
+ LinkTypeFDDI LinkType = 10
+ LinkTypePPP_HDLC LinkType = 50
+ LinkTypePPPEthernet LinkType = 51
+ LinkTypeATM_RFC1483 LinkType = 100
+ LinkTypeRaw LinkType = 101
+ LinkTypeC_HDLC LinkType = 104
+ LinkTypeIEEE802_11 LinkType = 105
+ LinkTypeFRelay LinkType = 107
+ LinkTypeLoop LinkType = 108
+ LinkTypeLinuxSLL LinkType = 113
+ LinkTypeLTalk LinkType = 114
+ LinkTypePFLog LinkType = 117
+ LinkTypePrismHeader LinkType = 119
+ LinkTypeIPOverFC LinkType = 122
+ LinkTypeSunATM LinkType = 123
+ LinkTypeIEEE80211Radio LinkType = 127
+ LinkTypeARCNetLinux LinkType = 129
+ LinkTypeIPOver1394 LinkType = 138
+ LinkTypeMTP2Phdr LinkType = 139
+ LinkTypeMTP2 LinkType = 140
+ LinkTypeMTP3 LinkType = 141
+ LinkTypeSCCP LinkType = 142
+ LinkTypeDOCSIS LinkType = 143
+ LinkTypeLinuxIRDA LinkType = 144
+ LinkTypeLinuxLAPD LinkType = 177
+ LinkTypeLinuxUSB LinkType = 220
+ LinkTypeIPv4 LinkType = 228
+ LinkTypeIPv6 LinkType = 229
+)
+
+// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516
+type PPPoECode uint8
+
+const (
+ PPPoECodePADI PPPoECode = 0x09
+ PPPoECodePADO PPPoECode = 0x07
+ PPPoECodePADR PPPoECode = 0x19
+ PPPoECodePADS PPPoECode = 0x65
+ PPPoECodePADT PPPoECode = 0xA7
+ PPPoECodeSession PPPoECode = 0x00
+)
+
+// PPPType is an enumeration of PPP type values, and acts as a decoder for any
+// type it supports.
+type PPPType uint16
+
+const (
+ PPPTypeIPv4 PPPType = 0x0021
+ PPPTypeIPv6 PPPType = 0x0057
+ PPPTypeMPLSUnicast PPPType = 0x0281
+ PPPTypeMPLSMulticast PPPType = 0x0283
+)
+
+// SCTPChunkType is an enumeration of chunk types inside SCTP packets.
+type SCTPChunkType uint8
+
+const (
+ SCTPChunkTypeData SCTPChunkType = 0
+ SCTPChunkTypeInit SCTPChunkType = 1
+ SCTPChunkTypeInitAck SCTPChunkType = 2
+ SCTPChunkTypeSack SCTPChunkType = 3
+ SCTPChunkTypeHeartbeat SCTPChunkType = 4
+ SCTPChunkTypeHeartbeatAck SCTPChunkType = 5
+ SCTPChunkTypeAbort SCTPChunkType = 6
+ SCTPChunkTypeShutdown SCTPChunkType = 7
+ SCTPChunkTypeShutdownAck SCTPChunkType = 8
+ SCTPChunkTypeError SCTPChunkType = 9
+ SCTPChunkTypeCookieEcho SCTPChunkType = 10
+ SCTPChunkTypeCookieAck SCTPChunkType = 11
+ SCTPChunkTypeShutdownComplete SCTPChunkType = 14
+)
+
+// FDDIFrameControl is an enumeration of FDDI frame control bytes.
+type FDDIFrameControl uint8
+
+const (
+ FDDIFrameControlLLC FDDIFrameControl = 0x50
+)
+
+// EAPOLType is an enumeration of EAPOL packet types.
+type EAPOLType uint8
+
+const (
+ EAPOLTypeEAP EAPOLType = 0
+ EAPOLTypeStart EAPOLType = 1
+ EAPOLTypeLogOff EAPOLType = 2
+ EAPOLTypeKey EAPOLType = 3
+ EAPOLTypeASFAlert EAPOLType = 4
+)
+
+// ProtocolFamily is the set of values defined as PF_* in sys/socket.h
+type ProtocolFamily uint8
+
+const (
+ ProtocolFamilyIPv4 ProtocolFamily = 2
+ // BSDs use different values for INET6... glory be. These values taken from
+ // tcpdump 4.3.0.
+ ProtocolFamilyIPv6BSD ProtocolFamily = 24
+ ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28
+ ProtocolFamilyIPv6Darwin ProtocolFamily = 30
+ ProtocolFamilyIPv6Linux ProtocolFamily = 10
+)
+
+// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields.
+// By combining these two fields together into a single type, we're able to
+// provide a String function that correctly displays the subtype given the
+// top-level type.
+//
+// If you just care about the top-level type, use the MainType function.
+type Dot11Type uint8
+
+// MainType strips the subtype information from the given type,
+// returning just the overarching type (Mgmt, Ctrl, Data, Reserved).
+func (d Dot11Type) MainType() Dot11Type {
+ return d & dot11TypeMask
+}
+
+func (d Dot11Type) QOS() bool {
+ return d&dot11QOSMask == Dot11TypeDataQOSData
+}
+
+const (
+ Dot11TypeMgmt Dot11Type = 0x00
+ Dot11TypeCtrl Dot11Type = 0x01
+ Dot11TypeData Dot11Type = 0x02
+ Dot11TypeReserved Dot11Type = 0x03
+ dot11TypeMask = 0x03
+ dot11QOSMask = 0x23
+
+ // The following are type/subtype conglomerations.
+
+ // Management
+ Dot11TypeMgmtAssociationReq Dot11Type = 0x00
+ Dot11TypeMgmtAssociationResp Dot11Type = 0x04
+ Dot11TypeMgmtReassociationReq Dot11Type = 0x08
+ Dot11TypeMgmtReassociationResp Dot11Type = 0x0c
+ Dot11TypeMgmtProbeReq Dot11Type = 0x10
+ Dot11TypeMgmtProbeResp Dot11Type = 0x14
+ Dot11TypeMgmtMeasurementPilot Dot11Type = 0x18
+ Dot11TypeMgmtBeacon Dot11Type = 0x20
+ Dot11TypeMgmtATIM Dot11Type = 0x24
+ Dot11TypeMgmtDisassociation Dot11Type = 0x28
+ Dot11TypeMgmtAuthentication Dot11Type = 0x2c
+ Dot11TypeMgmtDeauthentication Dot11Type = 0x30
+ Dot11TypeMgmtAction Dot11Type = 0x34
+ Dot11TypeMgmtActionNoAck Dot11Type = 0x38
+
+ // Control
+ Dot11TypeCtrlWrapper Dot11Type = 0x1d
+ Dot11TypeCtrlBlockAckReq Dot11Type = 0x21
+ Dot11TypeCtrlBlockAck Dot11Type = 0x25
+ Dot11TypeCtrlPowersavePoll Dot11Type = 0x29
+ Dot11TypeCtrlRTS Dot11Type = 0x2d
+ Dot11TypeCtrlCTS Dot11Type = 0x31
+ Dot11TypeCtrlAck Dot11Type = 0x35
+ Dot11TypeCtrlCFEnd Dot11Type = 0x39
+ Dot11TypeCtrlCFEndAck Dot11Type = 0x3d
+
+ // Data
+ Dot11TypeDataCFAck Dot11Type = 0x06
+ Dot11TypeDataCFPoll Dot11Type = 0x0a
+ Dot11TypeDataCFAckPoll Dot11Type = 0x0e
+ Dot11TypeDataNull Dot11Type = 0x12
+ Dot11TypeDataCFAckNoData Dot11Type = 0x16
+ Dot11TypeDataCFPollNoData Dot11Type = 0x1a
+ Dot11TypeDataCFAckPollNoData Dot11Type = 0x1e
+ Dot11TypeDataQOSData Dot11Type = 0x22
+ Dot11TypeDataQOSDataCFAck Dot11Type = 0x26
+ Dot11TypeDataQOSDataCFPoll Dot11Type = 0x2a
+ Dot11TypeDataQOSDataCFAckPoll Dot11Type = 0x2e
+ Dot11TypeDataQOSNull Dot11Type = 0x32
+ Dot11TypeDataQOSCFPollNoData Dot11Type = 0x3a
+ Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e
+)
+
+// Decode a raw v4 or v6 IP packet.
+func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error {
+ version := data[0] >> 4
+ switch version {
+ case 4:
+ return decodeIPv4(data, p)
+ case 6:
+ return decodeIPv6(data, p)
+ }
+ return fmt.Errorf("Invalid IP packet version %v", version)
+}
+
+func initActualTypeData() {
+ // Each of the XXXTypeMetadata arrays contains mappings of how to handle enum
+ // values for various enum types in gopacket/layers.
+ // These arrays are actually created by gen2.go and stored in
+ // enums_generated.go.
+ //
+ // So, EthernetTypeMetadata[2] contains information on how to handle EthernetType
+ // 2, including which name to give it and which decoder to use to decode
+ // packet data of that type. These arrays are filled by default with all of the
+ // protocols gopacket/layers knows how to handle, but users of the library can
+ // add new decoders or override existing ones. For example, if you write a better
+ // TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith
+ // with your new decoder, and all gopacket/layers decoding will use your new
+ // decoder whenever they encounter that IPProtocol.
+
+ // Here we link up all enumerations with their respective names and decoders.
+ EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC}
+ EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP}
+ EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+ EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP}
+ EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE}
+ EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE}
+ EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP}
+ EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery}
+ EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery}
+ EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery}
+ EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS}
+ EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS}
+ EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL}
+ EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+ EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet}
+
+ IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP}
+ IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP}
+ IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4}
+ IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6}
+ IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP}
+ IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP}
+ IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP}
+ IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE}
+ IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop}
+ IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing}
+ IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment}
+ IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination}
+ IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF}
+ IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH}
+ IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP}
+ IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite}
+ IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS}
+ IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload}
+ IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP}
+ IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP}
+
+ SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"}
+
+ PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"}
+ PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"}
+ PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"}
+ PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"}
+
+ PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+
+ LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"}
+ LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+ LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"}
+ LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"}
+ LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"}
+ LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"}
+ LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"}
+ LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ // See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85
+ // Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508
+ if runtime.GOOS == "openbsd" {
+ LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ } else {
+ LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ }
+ LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"}
+ LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"}
+ LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"}
+ LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"}
+ LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"}
+
+ FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"}
+
+ EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP}
+ EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey}
+
+ ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+
+ Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq}
+ Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp}
+ Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq}
+ Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp}
+ Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq}
+ Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp}
+ Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot}
+ Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon}
+ Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM}
+ Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation}
+ Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication}
+ Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication}
+ Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction}
+ Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck}
+ Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl}
+ Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl}
+ Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq}
+ Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck}
+ Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll}
+ Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS}
+ Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS}
+ Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck}
+ Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd}
+ Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck}
+ Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data}
+ Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck}
+ Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll}
+ Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll}
+ Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull}
+ Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData}
+ Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll}
+ Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull}
+ Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData}
+
+ USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt}
+ USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl}
+ USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk}
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/google/gopacket/layers/enums_generated.go
new file mode 100644
index 0000000..bf77aac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums_generated.go
@@ -0,0 +1,434 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033
+
+import (
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+func init() {
+ initUnknownTypesForLinkType()
+ initUnknownTypesForEthernetType()
+ initUnknownTypesForPPPType()
+ initUnknownTypesForIPProtocol()
+ initUnknownTypesForSCTPChunkType()
+ initUnknownTypesForPPPoECode()
+ initUnknownTypesForFDDIFrameControl()
+ initUnknownTypesForEAPOLType()
+ initUnknownTypesForProtocolFamily()
+ initUnknownTypesForDot11Type()
+ initUnknownTypesForUSBTransportType()
+ initActualTypeData()
+}
+
+// Decoder calls LinkTypeMetadata.DecodeWith's decoder.
+func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return LinkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns LinkTypeMetadata.Name.
+func (a LinkType) String() string {
+ return LinkTypeMetadata[a].Name
+}
+
+// LayerType returns LinkTypeMetadata.LayerType.
+func (a LinkType) LayerType() gopacket.LayerType {
+ return LinkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForLinkType int
+
+func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForLinkType) Error() string {
+ return fmt.Sprintf("Unable to decode LinkType %d", int(*a))
+}
+
+var errorDecodersForLinkType [256]errorDecoderForLinkType
+var LinkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForLinkType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForLinkType[i] = errorDecoderForLinkType(i)
+ LinkTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForLinkType[i],
+ Name: "UnknownLinkType",
+ }
+ }
+}
+
+// Decoder calls EthernetTypeMetadata.DecodeWith's decoder.
+func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return EthernetTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EthernetTypeMetadata.Name.
+func (a EthernetType) String() string {
+ return EthernetTypeMetadata[a].Name
+}
+
+// LayerType returns EthernetTypeMetadata.LayerType.
+func (a EthernetType) LayerType() gopacket.LayerType {
+ return EthernetTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEthernetType int
+
+func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForEthernetType) Error() string {
+ return fmt.Sprintf("Unable to decode EthernetType %d", int(*a))
+}
+
+var errorDecodersForEthernetType [65536]errorDecoderForEthernetType
+var EthernetTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForEthernetType() {
+ for i := 0; i < 65536; i++ {
+ errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i)
+ EthernetTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForEthernetType[i],
+ Name: "UnknownEthernetType",
+ }
+ }
+}
+
+// Decoder calls PPPTypeMetadata.DecodeWith's decoder.
+func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return PPPTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPTypeMetadata.Name.
+func (a PPPType) String() string {
+ return PPPTypeMetadata[a].Name
+}
+
+// LayerType returns PPPTypeMetadata.LayerType.
+func (a PPPType) LayerType() gopacket.LayerType {
+ return PPPTypeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPType int
+
+func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForPPPType) Error() string {
+ return fmt.Sprintf("Unable to decode PPPType %d", int(*a))
+}
+
+var errorDecodersForPPPType [65536]errorDecoderForPPPType
+var PPPTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForPPPType() {
+ for i := 0; i < 65536; i++ {
+ errorDecodersForPPPType[i] = errorDecoderForPPPType(i)
+ PPPTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForPPPType[i],
+ Name: "UnknownPPPType",
+ }
+ }
+}
+
+// Decoder calls IPProtocolMetadata.DecodeWith's decoder.
+func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return IPProtocolMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns IPProtocolMetadata.Name.
+func (a IPProtocol) String() string {
+ return IPProtocolMetadata[a].Name
+}
+
+// LayerType returns IPProtocolMetadata.LayerType.
+func (a IPProtocol) LayerType() gopacket.LayerType {
+ return IPProtocolMetadata[a].LayerType
+}
+
+type errorDecoderForIPProtocol int
+
+func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForIPProtocol) Error() string {
+ return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a))
+}
+
+var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol
+var IPProtocolMetadata [256]EnumMetadata
+
+func initUnknownTypesForIPProtocol() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i)
+ IPProtocolMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForIPProtocol[i],
+ Name: "UnknownIPProtocol",
+ }
+ }
+}
+
+// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder.
+func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns SCTPChunkTypeMetadata.Name.
+func (a SCTPChunkType) String() string {
+ return SCTPChunkTypeMetadata[a].Name
+}
+
+// LayerType returns SCTPChunkTypeMetadata.LayerType.
+func (a SCTPChunkType) LayerType() gopacket.LayerType {
+ return SCTPChunkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForSCTPChunkType int
+
+func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForSCTPChunkType) Error() string {
+ return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a))
+}
+
+var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType
+var SCTPChunkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForSCTPChunkType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i)
+ SCTPChunkTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForSCTPChunkType[i],
+ Name: "UnknownSCTPChunkType",
+ }
+ }
+}
+
+// Decoder calls PPPoECodeMetadata.DecodeWith's decoder.
+func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return PPPoECodeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPoECodeMetadata.Name.
+func (a PPPoECode) String() string {
+ return PPPoECodeMetadata[a].Name
+}
+
+// LayerType returns PPPoECodeMetadata.LayerType.
+func (a PPPoECode) LayerType() gopacket.LayerType {
+ return PPPoECodeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPoECode int
+
+func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForPPPoECode) Error() string {
+ return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a))
+}
+
+var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode
+var PPPoECodeMetadata [256]EnumMetadata
+
+func initUnknownTypesForPPPoECode() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i)
+ PPPoECodeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForPPPoECode[i],
+ Name: "UnknownPPPoECode",
+ }
+ }
+}
+
+// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder.
+func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns FDDIFrameControlMetadata.Name.
+func (a FDDIFrameControl) String() string {
+ return FDDIFrameControlMetadata[a].Name
+}
+
+// LayerType returns FDDIFrameControlMetadata.LayerType.
+func (a FDDIFrameControl) LayerType() gopacket.LayerType {
+ return FDDIFrameControlMetadata[a].LayerType
+}
+
+type errorDecoderForFDDIFrameControl int
+
+func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForFDDIFrameControl) Error() string {
+ return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a))
+}
+
+var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl
+var FDDIFrameControlMetadata [256]EnumMetadata
+
+func initUnknownTypesForFDDIFrameControl() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i)
+ FDDIFrameControlMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForFDDIFrameControl[i],
+ Name: "UnknownFDDIFrameControl",
+ }
+ }
+}
+
+// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder.
+func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EAPOLTypeMetadata.Name.
+func (a EAPOLType) String() string {
+ return EAPOLTypeMetadata[a].Name
+}
+
+// LayerType returns EAPOLTypeMetadata.LayerType.
+func (a EAPOLType) LayerType() gopacket.LayerType {
+ return EAPOLTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEAPOLType int
+
+func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForEAPOLType) Error() string {
+ return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a))
+}
+
+var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType
+var EAPOLTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForEAPOLType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i)
+ EAPOLTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForEAPOLType[i],
+ Name: "UnknownEAPOLType",
+ }
+ }
+}
+
+// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder.
+func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns ProtocolFamilyMetadata.Name.
+func (a ProtocolFamily) String() string {
+ return ProtocolFamilyMetadata[a].Name
+}
+
+// LayerType returns ProtocolFamilyMetadata.LayerType.
+func (a ProtocolFamily) LayerType() gopacket.LayerType {
+ return ProtocolFamilyMetadata[a].LayerType
+}
+
+type errorDecoderForProtocolFamily int
+
+func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForProtocolFamily) Error() string {
+ return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a))
+}
+
+var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily
+var ProtocolFamilyMetadata [256]EnumMetadata
+
+func initUnknownTypesForProtocolFamily() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i)
+ ProtocolFamilyMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForProtocolFamily[i],
+ Name: "UnknownProtocolFamily",
+ }
+ }
+}
+
+// Decoder calls Dot11TypeMetadata.DecodeWith's decoder.
+func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return Dot11TypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns Dot11TypeMetadata.Name.
+func (a Dot11Type) String() string {
+ return Dot11TypeMetadata[a].Name
+}
+
+// LayerType returns Dot11TypeMetadata.LayerType.
+func (a Dot11Type) LayerType() gopacket.LayerType {
+ return Dot11TypeMetadata[a].LayerType
+}
+
+type errorDecoderForDot11Type int
+
+func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForDot11Type) Error() string {
+ return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a))
+}
+
+var errorDecodersForDot11Type [256]errorDecoderForDot11Type
+var Dot11TypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForDot11Type() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i)
+ Dot11TypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForDot11Type[i],
+ Name: "UnknownDot11Type",
+ }
+ }
+}
+
+// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder.
+func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns USBTransportTypeMetadata.Name.
+func (a USBTransportType) String() string {
+ return USBTransportTypeMetadata[a].Name
+}
+
+// LayerType returns USBTransportTypeMetadata.LayerType.
+func (a USBTransportType) LayerType() gopacket.LayerType {
+ return USBTransportTypeMetadata[a].LayerType
+}
+
+type errorDecoderForUSBTransportType int
+
+func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForUSBTransportType) Error() string {
+ return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a))
+}
+
+var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType
+var USBTransportTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForUSBTransportType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i)
+ USBTransportTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForUSBTransportType[i],
+ Name: "UnknownUSBTransportType",
+ }
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/google/gopacket/layers/etherip.go
new file mode 100644
index 0000000..5b7b722
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/etherip.go
@@ -0,0 +1,45 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// EtherIP is the struct for storing RFC 3378 EtherIP packet headers.
+type EtherIP struct {
+ BaseLayer
+ Version uint8
+ Reserved uint16
+}
+
+// LayerType returns gopacket.LayerTypeEtherIP.
+func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Version = data[0] >> 4
+ e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff
+ e.BaseLayer = BaseLayer{data[:2], data[2:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EtherIP) CanDecode() gopacket.LayerClass {
+ return LayerTypeEtherIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EtherIP) NextLayerType() gopacket.LayerType {
+ return LayerTypeEthernet
+}
+
+func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error {
+ e := &EtherIP{}
+ return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/google/gopacket/layers/ethernet.go
new file mode 100644
index 0000000..b73748f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ethernet.go
@@ -0,0 +1,123 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/google/gopacket"
+ "net"
+)
+
+// EthernetBroadcast is the broadcast MAC address used by Ethernet.
+var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// Ethernet is the layer for Ethernet frame headers.
+type Ethernet struct {
+ BaseLayer
+ SrcMAC, DstMAC net.HardwareAddr
+ EthernetType EthernetType
+ // Length is only set if a length field exists within this header. Ethernet
+ // headers follow two different standards, one that uses an EthernetType, the
+ // other which defines a length the follows with a LLC header (802.3). If the
+ // former is the case, we set EthernetType and Length stays 0. In the latter
+ // case, we set Length and EthernetType = EthernetTypeLLC.
+ Length uint16
+}
+
+// LayerType returns LayerTypeEthernet
+func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet }
+
+func (e *Ethernet) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC)
+}
+
+func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 14 {
+ return errors.New("Ethernet packet too small")
+ }
+ eth.DstMAC = net.HardwareAddr(data[0:6])
+ eth.SrcMAC = net.HardwareAddr(data[6:12])
+ eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14]))
+ eth.BaseLayer = BaseLayer{data[:14], data[14:]}
+ eth.Length = 0
+ if eth.EthernetType < 0x0600 {
+ eth.Length = uint16(eth.EthernetType)
+ eth.EthernetType = EthernetTypeLLC
+ if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 {
+ df.SetTruncated()
+ } else if cmp > 0 {
+ // Strip off bytes at the end, since we have too many bytes
+ eth.Payload = eth.Payload[:len(eth.Payload)-cmp]
+ }
+ // fmt.Println(eth)
+ }
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if len(eth.DstMAC) != 6 {
+ return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC)
+ }
+ if len(eth.SrcMAC) != 6 {
+ return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC)
+ }
+ payload := b.Bytes()
+ bytes, err := b.PrependBytes(14)
+ if err != nil {
+ return err
+ }
+ copy(bytes, eth.DstMAC)
+ copy(bytes[6:], eth.SrcMAC)
+ if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC {
+ if opts.FixLengths {
+ eth.Length = uint16(len(payload))
+ }
+ if eth.EthernetType != EthernetTypeLLC {
+ return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length)
+ } else if eth.Length > 0x0600 {
+ return fmt.Errorf("invalid ethernet length %v", eth.Length)
+ }
+ binary.BigEndian.PutUint16(bytes[12:], eth.Length)
+ } else {
+ binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType))
+ }
+ length := len(b.Bytes())
+ if length < 60 {
+ // Pad out to 60 bytes.
+ padding, err := b.AppendBytes(60 - length)
+ if err != nil {
+ return err
+ }
+ copy(padding, lotsOfZeros[:])
+ }
+ return nil
+}
+
+func (eth *Ethernet) CanDecode() gopacket.LayerClass {
+ return LayerTypeEthernet
+}
+
+func (eth *Ethernet) NextLayerType() gopacket.LayerType {
+ return eth.EthernetType.LayerType()
+}
+
+func decodeEthernet(data []byte, p gopacket.PacketBuilder) error {
+ eth := &Ethernet{}
+ err := eth.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(eth)
+ p.SetLinkLayer(eth)
+ return p.NextDecoder(eth.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/google/gopacket/layers/fddi.go
new file mode 100644
index 0000000..ed9e195
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/fddi.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+ "net"
+)
+
+// FDDI contains the header for FDDI frames.
+type FDDI struct {
+ BaseLayer
+ FrameControl FDDIFrameControl
+ Priority uint8
+ SrcMAC, DstMAC net.HardwareAddr
+}
+
+// LayerType returns LayerTypeFDDI.
+func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI }
+
+// LinkFlow returns a new flow of type EndpointMAC.
+func (f *FDDI) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC)
+}
+
+func decodeFDDI(data []byte, p gopacket.PacketBuilder) error {
+ f := &FDDI{
+ FrameControl: FDDIFrameControl(data[0] & 0xF8),
+ Priority: data[0] & 0x07,
+ SrcMAC: net.HardwareAddr(data[1:7]),
+ DstMAC: net.HardwareAddr(data[7:13]),
+ BaseLayer: BaseLayer{data[:13], data[13:]},
+ }
+ p.SetLinkLayer(f)
+ p.AddLayer(f)
+ return p.NextDecoder(f.FrameControl)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen.go b/vendor/github.com/google/gopacket/layers/gen.go
new file mode 100644
index 0000000..ab7a0c0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This binary pulls known ports from IANA, and uses them to populate
+// iana_ports.go's TCPPortNames and UDPPortNames maps.
+//
+// go run gen.go | gofmt > iana_ports.go
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+)
+
+const fmtString = `// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at %s
+// Fetched from %q
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+%s}
+var udpPortNames = map[UDPPort]string{
+%s}
+var sctpPortNames = map[SCTPPort]string{
+%s}
+`
+
+var url = flag.String("url", "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml", "URL to grab port numbers from")
+
+func main() {
+ fmt.Fprintf(os.Stderr, "Fetching ports from %q\n", *url)
+ resp, err := http.Get(*url)
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintln(os.Stderr, "Parsing XML")
+ var registry struct {
+ Records []struct {
+ Protocol string `xml:"protocol"`
+ Number string `xml:"number"`
+ Name string `xml:"name"`
+ } `xml:"record"`
+ }
+ xml.Unmarshal(body, ®istry)
+ var tcpPorts bytes.Buffer
+ var udpPorts bytes.Buffer
+ var sctpPorts bytes.Buffer
+ done := map[string]map[int]bool{
+ "tcp": map[int]bool{},
+ "udp": map[int]bool{},
+ "sctp": map[int]bool{},
+ }
+ for _, r := range registry.Records {
+ port, err := strconv.Atoi(r.Number)
+ if err != nil {
+ continue
+ }
+ if r.Name == "" {
+ continue
+ }
+ var b *bytes.Buffer
+ switch r.Protocol {
+ case "tcp":
+ b = &tcpPorts
+ case "udp":
+ b = &udpPorts
+ case "sctp":
+ b = &sctpPorts
+ default:
+ continue
+ }
+ if done[r.Protocol][port] {
+ continue
+ }
+ done[r.Protocol][port] = true
+ fmt.Fprintf(b, "\t%d: %q,\n", port, r.Name)
+ }
+ fmt.Fprintln(os.Stderr, "Writing results to stdout")
+ fmt.Printf(fmtString, time.Now(), *url, tcpPorts.String(), udpPorts.String(), sctpPorts.String())
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen2.go b/vendor/github.com/google/gopacket/layers/gen2.go
new file mode 100644
index 0000000..150cad7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen2.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This binary handles creating string constants and function templates for enums.
+//
+// go run gen.go | gofmt > enums_generated.go
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "text/template"
+ "time"
+)
+
+const fmtString = `// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at %s
+
+import (
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+`
+
+var funcsTmpl = template.Must(template.New("foo").Parse(`
+// Decoder calls {{.Name}}Metadata.DecodeWith's decoder.
+func (a {{.Name}}) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return {{.Name}}Metadata[a].DecodeWith.Decode(data, p)
+}
+// String returns {{.Name}}Metadata.Name.
+func (a {{.Name}}) String() string {
+ return {{.Name}}Metadata[a].Name
+}
+// LayerType returns {{.Name}}Metadata.LayerType.
+func (a {{.Name}}) LayerType() gopacket.LayerType {
+ return {{.Name}}Metadata[a].LayerType
+}
+
+type errorDecoderFor{{.Name}} int
+func (a *errorDecoderFor{{.Name}}) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderFor{{.Name}}) Error() string {
+ return fmt.Sprintf("Unable to decode {{.Name}} %d", int(*a))
+}
+
+var errorDecodersFor{{.Name}} [{{.Num}}]errorDecoderFor{{.Name}}
+var {{.Name}}Metadata [{{.Num}}]EnumMetadata
+
+func initUnknownTypesFor{{.Name}}() {
+ for i := 0; i < {{.Num}}; i++ {
+ errorDecodersFor{{.Name}}[i] = errorDecoderFor{{.Name}}(i)
+ {{.Name}}Metadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersFor{{.Name}}[i],
+ Name: "Unknown{{.Name}}",
+ }
+ }
+}
+`))
+
+func main() {
+ fmt.Fprintf(os.Stderr, "Writing results to stdout\n")
+ fmt.Printf(fmtString, time.Now())
+ types := []struct {
+ Name string
+ Num int
+ }{
+ {"LinkType", 256},
+ {"EthernetType", 65536},
+ {"PPPType", 65536},
+ {"IPProtocol", 256},
+ {"SCTPChunkType", 256},
+ {"PPPoECode", 256},
+ {"FDDIFrameControl", 256},
+ {"EAPOLType", 256},
+ {"ProtocolFamily", 256},
+ {"Dot11Type", 256},
+ {"USBTransportType", 256},
+ }
+
+ fmt.Println("func init() {")
+ for _, t := range types {
+ fmt.Printf("initUnknownTypesFor%s()\n", t.Name)
+ }
+ fmt.Println("initActualTypeData()")
+ fmt.Println("}")
+ for _, t := range types {
+ if err := funcsTmpl.Execute(os.Stdout, t); err != nil {
+ log.Fatalf("Failed to execute template %s: %v", t.Name, err)
+ }
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/google/gopacket/layers/gen_linted.sh
new file mode 100644
index 0000000..75c701f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen_linted.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+for i in *.go; do golint $i | grep -q . || echo $i; done > .linted
diff --git a/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/google/gopacket/layers/geneve.go
new file mode 100644
index 0000000..72fe7c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/geneve.go
@@ -0,0 +1,110 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03
+// Geneve Header:
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Virtual Network Identifier (VNI) | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Variable Length Options |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type Geneve struct {
+ BaseLayer
+ Version uint8 // 2 bits
+ OptionsLength uint8 // 6 bits
+ OAMPacket bool // 1 bits
+ CriticalOption bool // 1 bits
+ Protocol EthernetType // 16 bits
+ VNI uint32 // 24bits
+ Options []*GeneveOption
+}
+
+// Geneve Tunnel Options
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Option Class | Type |R|R|R| Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Variable Option Data |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type GeneveOption struct {
+ Class uint16 // 16 bits
+ Type uint8 // 8 bits
+ Flags uint8 // 3 bits
+ Length uint8 // 5 bits
+ Data []byte
+}
+
+// LayerType returns LayerTypeGeneve
+func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve }
+
+func decodeGeneveOption(data []byte, gn *Geneve) (*GeneveOption, uint8) {
+ opt := &GeneveOption{}
+
+ opt.Class = binary.BigEndian.Uint16(data[0:2])
+ opt.Type = data[2]
+ opt.Flags = data[3] >> 4
+ opt.Length = (data[3]&0xf)*4 + 4
+
+ opt.Data = make([]byte, opt.Length-4)
+ copy(opt.Data, data[4:opt.Length])
+
+ return opt, opt.Length
+}
+
+func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 7 {
+ df.SetTruncated()
+ return errors.New("geneve packet too short")
+ }
+
+ gn.Version = data[0] >> 7
+ gn.OptionsLength = (data[0] & 0x3f) * 4
+
+ gn.OAMPacket = data[1]&0x80 > 0
+ gn.CriticalOption = data[1]&0x40 > 0
+ gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+
+ var buf [4]byte
+ copy(buf[1:], data[4:7])
+ gn.VNI = binary.BigEndian.Uint32(buf[:])
+
+ offset, length := uint8(8), int32(gn.OptionsLength)
+ if len(data) < int(length+7) {
+ df.SetTruncated()
+ return errors.New("geneve packet too short")
+ }
+
+ for length > 0 {
+ opt, len := decodeGeneveOption(data[offset:], gn)
+ gn.Options = append(gn.Options, opt)
+
+ length -= int32(len)
+ offset += len
+ }
+
+ gn.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+
+ return nil
+}
+
+func (gn *Geneve) NextLayerType() gopacket.LayerType {
+ return gn.Protocol.LayerType()
+}
+
+func decodeGeneve(data []byte, p gopacket.PacketBuilder) error {
+ gn := &Geneve{}
+ return decodingLayerDecoder(gn, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/google/gopacket/layers/gre.go
new file mode 100644
index 0000000..9c5e7d2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gre.go
@@ -0,0 +1,200 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+
+ "github.com/google/gopacket"
+)
+
+// GRE is a Generic Routing Encapsulation header.
+type GRE struct {
+ BaseLayer
+ ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool
+ RecursionControl, Flags, Version uint8
+ Protocol EthernetType
+ Checksum, Offset uint16
+ Key, Seq, Ack uint32
+ *GRERouting
+}
+
+// GRERouting is GRE routing information, present if the RoutingPresent flag is
+// set.
+type GRERouting struct {
+ AddressFamily uint16
+ SREOffset, SRELength uint8
+ RoutingInformation []byte
+ Next *GRERouting
+}
+
+// LayerType returns gopacket.LayerTypeGRE.
+func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ g.ChecksumPresent = data[0]&0x80 != 0
+ g.RoutingPresent = data[0]&0x40 != 0
+ g.KeyPresent = data[0]&0x20 != 0
+ g.SeqPresent = data[0]&0x10 != 0
+ g.StrictSourceRoute = data[0]&0x08 != 0
+ g.AckPresent = data[1]&0x80 != 0
+ g.RecursionControl = data[0] & 0x7
+ g.Flags = data[1] >> 3
+ g.Version = data[1] & 0x7
+ g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ offset := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2])
+ g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ offset += 4
+ }
+ if g.KeyPresent {
+ g.Key = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ if g.SeqPresent {
+ g.Seq = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ if g.RoutingPresent {
+ tail := &g.GRERouting
+ for {
+ sre := &GRERouting{
+ AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]),
+ SREOffset: data[offset+2],
+ SRELength: data[offset+3],
+ }
+ sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)]
+ offset += 4 + int(sre.SRELength)
+ if sre.AddressFamily == 0 && sre.SRELength == 0 {
+ break
+ }
+ (*tail) = sre
+ tail = &sre.Next
+ }
+ }
+ if g.AckPresent {
+ g.Ack = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ g.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the SerializationBuffer,
+// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info.
+func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ size := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ size += 4
+ }
+ if g.KeyPresent {
+ size += 4
+ }
+ if g.SeqPresent {
+ size += 4
+ }
+ if g.RoutingPresent {
+ r := g.GRERouting
+ for r != nil {
+ size += 4 + int(r.SRELength)
+ r = r.Next
+ }
+ size += 4
+ }
+ if g.AckPresent {
+ size += 4
+ }
+ buf, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ // Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags.
+ buf[0] = 0
+ buf[1] = 0
+ if g.ChecksumPresent {
+ buf[0] |= 0x80
+ }
+ if g.RoutingPresent {
+ buf[0] |= 0x40
+ }
+ if g.KeyPresent {
+ buf[0] |= 0x20
+ }
+ if g.SeqPresent {
+ buf[0] |= 0x10
+ }
+ if g.StrictSourceRoute {
+ buf[0] |= 0x08
+ }
+ if g.AckPresent {
+ buf[1] |= 0x80
+ }
+ buf[0] |= g.RecursionControl
+ buf[1] |= g.Flags << 3
+ buf[1] |= g.Version
+ binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol))
+ offset := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ // Don't write the checksum value yet, as we may need to compute it,
+ // which requires the entire header be complete.
+ // Instead we zeroize the memory in case it is dirty.
+ buf[offset] = 0
+ buf[offset+1] = 0
+ binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset)
+ offset += 4
+ }
+ if g.KeyPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key)
+ offset += 4
+ }
+ if g.SeqPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq)
+ offset += 4
+ }
+ if g.RoutingPresent {
+ sre := g.GRERouting
+ for sre != nil {
+ binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily)
+ buf[offset+2] = sre.SREOffset
+ buf[offset+3] = sre.SRELength
+ copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation)
+ offset += 4 + int(sre.SRELength)
+ sre = sre.Next
+ }
+ // Terminate routing field with a "NULL" SRE.
+ binary.BigEndian.PutUint32(buf[offset:offset+4], 0)
+ }
+ if g.AckPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack)
+ offset += 4
+ }
+ if g.ChecksumPresent {
+ if opts.ComputeChecksums {
+ g.Checksum = tcpipChecksum(b.Bytes(), 0)
+ }
+
+ binary.BigEndian.PutUint16(buf[4:6], g.Checksum)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (g *GRE) CanDecode() gopacket.LayerClass {
+ return LayerTypeGRE
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (g *GRE) NextLayerType() gopacket.LayerType {
+ return g.Protocol.LayerType()
+}
+
+func decodeGRE(data []byte, p gopacket.PacketBuilder) error {
+ g := &GRE{}
+ return decodingLayerDecoder(g, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/google/gopacket/layers/gtp.go
new file mode 100644
index 0000000..0ec8a6a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gtp.go
@@ -0,0 +1,181 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+const gtpMinimumSizeInBytes int = 8
+
+// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP without the need to use another version number.
+type GTPExtensionHeader struct {
+ Type uint8
+ Content []byte
+}
+
+// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces.
+// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595
+type GTPv1U struct {
+ BaseLayer
+ Version uint8
+ ProtocolType uint8
+ Reserved uint8
+ ExtensionHeaderFlag bool
+ SequenceNumberFlag bool
+ NPDUFlag bool
+ MessageType uint8
+ MessageLength uint16
+ TEID uint32
+ SequenceNumber uint16
+ NPDU uint8
+ GTPExtensionHeaders []GTPExtensionHeader
+}
+
+// LayerType returns LayerTypeGTPV1U
+func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U }
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet
+func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ hLen := gtpMinimumSizeInBytes
+ dLen := len(data)
+ if dLen < hLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ g.Version = (data[0] >> 5) & 0x07
+ g.ProtocolType = (data[0] >> 4) & 0x01
+ g.Reserved = (data[0] >> 3) & 0x01
+ g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1
+ g.NPDUFlag = (data[0] & 0x01) == 1
+ g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1
+ g.MessageType = data[1]
+ g.MessageLength = binary.BigEndian.Uint16(data[2:4])
+ pLen := 8 + g.MessageLength
+ if uint16(dLen) < pLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ // Field used to multiplex different connections in the same GTP tunnel.
+ g.TEID = binary.BigEndian.Uint32(data[4:8])
+ cIndex := uint16(hLen)
+ if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag {
+ hLen += 4
+ cIndex += 4
+ if dLen < hLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ if g.SequenceNumberFlag {
+ g.SequenceNumber = binary.BigEndian.Uint16(data[8:10])
+ }
+ if g.NPDUFlag {
+ g.NPDU = data[10]
+ }
+ if g.ExtensionHeaderFlag {
+ extensionFlag := true
+ for extensionFlag {
+ extensionType := uint8(data[cIndex-1])
+ extensionLength := uint(data[cIndex])
+ if extensionLength == 0 {
+ return fmt.Errorf("GTP packet with invalid extension header")
+ }
+ // extensionLength is in 4-octet units
+ lIndex := cIndex + (uint16(extensionLength) * 4)
+ if uint16(dLen) < lIndex {
+ fmt.Println(dLen, lIndex)
+ return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen)
+ }
+ content := data[cIndex+1 : lIndex-1]
+ eh := GTPExtensionHeader{Type: extensionType, Content: content}
+ g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh)
+ cIndex = lIndex
+ // Check if coming bytes are from an extension header
+ extensionFlag = data[cIndex-1] != 0
+
+ }
+ }
+ }
+ g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]}
+ return nil
+
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(gtpMinimumSizeInBytes)
+ if err != nil {
+ return err
+ }
+ data[0] |= (g.Version << 5)
+ data[0] |= (1 << 4)
+ if len(g.GTPExtensionHeaders) > 0 {
+ data[0] |= 0x04
+ g.ExtensionHeaderFlag = true
+ }
+ if g.SequenceNumberFlag {
+ data[0] |= 0x02
+ }
+ if g.NPDUFlag {
+ data[0] |= 0x01
+ }
+ data[1] = g.MessageType
+ binary.BigEndian.PutUint16(data[2:4], g.MessageLength)
+ binary.BigEndian.PutUint32(data[4:8], g.TEID)
+ if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag {
+ data, err := b.AppendBytes(4)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(data[:2], g.SequenceNumber)
+ data[2] = g.NPDU
+ for _, eh := range g.GTPExtensionHeaders {
+ data[len(data)-1] = eh.Type
+ lContent := len(eh.Content)
+ // extensionLength is in 4-octet units
+ extensionLength := (lContent + 2) / 4
+ // Get two extra byte for the next extension header type and length
+ data, err = b.AppendBytes(lContent + 2)
+ if err != nil {
+ return err
+ }
+ data[0] = byte(extensionLength)
+ copy(data[1:lContent+1], eh.Content)
+ }
+ }
+ return nil
+
+}
+
+// CanDecode returns a set of layers that GTP objects can decode.
+func (g *GTPv1U) CanDecode() gopacket.LayerClass {
+ return LayerTypeGTPv1U
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+func (g *GTPv1U) NextLayerType() gopacket.LayerType {
+ version := uint8(g.LayerPayload()[0]) >> 4
+ if version == 4 {
+ return LayerTypeIPv4
+ } else if version == 6 {
+ return LayerTypeIPv6
+ } else {
+ return LayerTypePPP
+ }
+}
+
+func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error {
+ gtp := >Pv1U{}
+ err := gtp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(gtp)
+ return p.NextDecoder(gtp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/google/gopacket/layers/iana_ports.go
new file mode 100644
index 0000000..ddcf3ec
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/iana_ports.go
@@ -0,0 +1,11351 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290
+// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml"
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+ 1: "tcpmux",
+ 2: "compressnet",
+ 3: "compressnet",
+ 5: "rje",
+ 7: "echo",
+ 9: "discard",
+ 11: "systat",
+ 13: "daytime",
+ 17: "qotd",
+ 18: "msp",
+ 19: "chargen",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 23: "telnet",
+ 25: "smtp",
+ 27: "nsw-fe",
+ 29: "msg-icp",
+ 31: "msg-auth",
+ 33: "dsp",
+ 37: "time",
+ 38: "rap",
+ 39: "rlp",
+ 41: "graphics",
+ 42: "name",
+ 43: "nicname",
+ 44: "mpm-flags",
+ 45: "mpm",
+ 46: "mpm-snd",
+ 48: "auditd",
+ 49: "tacacs",
+ 50: "re-mail-ck",
+ 52: "xns-time",
+ 53: "domain",
+ 54: "xns-ch",
+ 55: "isi-gl",
+ 56: "xns-auth",
+ 58: "xns-mail",
+ 62: "acas",
+ 63: "whoispp",
+ 64: "covia",
+ 65: "tacacs-ds",
+ 66: "sql-net",
+ 67: "bootps",
+ 68: "bootpc",
+ 69: "tftp",
+ 70: "gopher",
+ 71: "netrjs-1",
+ 72: "netrjs-2",
+ 73: "netrjs-3",
+ 74: "netrjs-4",
+ 76: "deos",
+ 78: "vettcp",
+ 79: "finger",
+ 80: "http",
+ 82: "xfer",
+ 83: "mit-ml-dev",
+ 84: "ctf",
+ 85: "mit-ml-dev",
+ 86: "mfcobol",
+ 88: "kerberos",
+ 89: "su-mit-tg",
+ 90: "dnsix",
+ 91: "mit-dov",
+ 92: "npp",
+ 93: "dcp",
+ 94: "objcall",
+ 95: "supdup",
+ 96: "dixie",
+ 97: "swift-rvf",
+ 98: "tacnews",
+ 99: "metagram",
+ 101: "hostname",
+ 102: "iso-tsap",
+ 103: "gppitnp",
+ 104: "acr-nema",
+ 105: "cso",
+ 106: "3com-tsmux",
+ 107: "rtelnet",
+ 108: "snagas",
+ 109: "pop2",
+ 110: "pop3",
+ 111: "sunrpc",
+ 112: "mcidas",
+ 113: "ident",
+ 115: "sftp",
+ 116: "ansanotify",
+ 117: "uucp-path",
+ 118: "sqlserv",
+ 119: "nntp",
+ 120: "cfdptkt",
+ 121: "erpc",
+ 122: "smakynet",
+ 123: "ntp",
+ 124: "ansatrader",
+ 125: "locus-map",
+ 126: "nxedit",
+ 127: "locus-con",
+ 128: "gss-xlicen",
+ 129: "pwdgen",
+ 130: "cisco-fna",
+ 131: "cisco-tna",
+ 132: "cisco-sys",
+ 133: "statsrv",
+ 134: "ingres-net",
+ 135: "epmap",
+ 136: "profile",
+ 137: "netbios-ns",
+ 138: "netbios-dgm",
+ 139: "netbios-ssn",
+ 140: "emfis-data",
+ 141: "emfis-cntl",
+ 142: "bl-idm",
+ 143: "imap",
+ 144: "uma",
+ 145: "uaac",
+ 146: "iso-tp0",
+ 147: "iso-ip",
+ 148: "jargon",
+ 149: "aed-512",
+ 150: "sql-net",
+ 151: "hems",
+ 152: "bftp",
+ 153: "sgmp",
+ 154: "netsc-prod",
+ 155: "netsc-dev",
+ 156: "sqlsrv",
+ 157: "knet-cmp",
+ 158: "pcmail-srv",
+ 159: "nss-routing",
+ 160: "sgmp-traps",
+ 161: "snmp",
+ 162: "snmptrap",
+ 163: "cmip-man",
+ 164: "cmip-agent",
+ 165: "xns-courier",
+ 166: "s-net",
+ 167: "namp",
+ 168: "rsvd",
+ 169: "send",
+ 170: "print-srv",
+ 171: "multiplex",
+ 172: "cl-1",
+ 173: "xyplex-mux",
+ 174: "mailq",
+ 175: "vmnet",
+ 176: "genrad-mux",
+ 177: "xdmcp",
+ 178: "nextstep",
+ 179: "bgp",
+ 180: "ris",
+ 181: "unify",
+ 182: "audit",
+ 183: "ocbinder",
+ 184: "ocserver",
+ 185: "remote-kis",
+ 186: "kis",
+ 187: "aci",
+ 188: "mumps",
+ 189: "qft",
+ 190: "gacp",
+ 191: "prospero",
+ 192: "osu-nms",
+ 193: "srmp",
+ 194: "irc",
+ 195: "dn6-nlm-aud",
+ 196: "dn6-smm-red",
+ 197: "dls",
+ 198: "dls-mon",
+ 199: "smux",
+ 200: "src",
+ 201: "at-rtmp",
+ 202: "at-nbp",
+ 203: "at-3",
+ 204: "at-echo",
+ 205: "at-5",
+ 206: "at-zis",
+ 207: "at-7",
+ 208: "at-8",
+ 209: "qmtp",
+ 210: "z39-50",
+ 211: "914c-g",
+ 212: "anet",
+ 213: "ipx",
+ 214: "vmpwscs",
+ 215: "softpc",
+ 216: "CAIlic",
+ 217: "dbase",
+ 218: "mpp",
+ 219: "uarps",
+ 220: "imap3",
+ 221: "fln-spx",
+ 222: "rsh-spx",
+ 223: "cdc",
+ 224: "masqdialer",
+ 242: "direct",
+ 243: "sur-meas",
+ 244: "inbusiness",
+ 245: "link",
+ 246: "dsp3270",
+ 247: "subntbcst-tftp",
+ 248: "bhfhs",
+ 256: "rap",
+ 257: "set",
+ 259: "esro-gen",
+ 260: "openport",
+ 261: "nsiiops",
+ 262: "arcisdms",
+ 263: "hdap",
+ 264: "bgmp",
+ 265: "x-bone-ctl",
+ 266: "sst",
+ 267: "td-service",
+ 268: "td-replica",
+ 269: "manet",
+ 271: "pt-tls",
+ 280: "http-mgmt",
+ 281: "personal-link",
+ 282: "cableport-ax",
+ 283: "rescap",
+ 284: "corerjd",
+ 286: "fxp",
+ 287: "k-block",
+ 308: "novastorbakcup",
+ 309: "entrusttime",
+ 310: "bhmds",
+ 311: "asip-webadmin",
+ 312: "vslmp",
+ 313: "magenta-logic",
+ 314: "opalis-robot",
+ 315: "dpsi",
+ 316: "decauth",
+ 317: "zannet",
+ 318: "pkix-timestamp",
+ 319: "ptp-event",
+ 320: "ptp-general",
+ 321: "pip",
+ 322: "rtsps",
+ 323: "rpki-rtr",
+ 324: "rpki-rtr-tls",
+ 333: "texar",
+ 344: "pdap",
+ 345: "pawserv",
+ 346: "zserv",
+ 347: "fatserv",
+ 348: "csi-sgwp",
+ 349: "mftp",
+ 350: "matip-type-a",
+ 351: "matip-type-b",
+ 352: "dtag-ste-sb",
+ 353: "ndsauth",
+ 354: "bh611",
+ 355: "datex-asn",
+ 356: "cloanto-net-1",
+ 357: "bhevent",
+ 358: "shrinkwrap",
+ 359: "nsrmp",
+ 360: "scoi2odialog",
+ 361: "semantix",
+ 362: "srssend",
+ 363: "rsvp-tunnel",
+ 364: "aurora-cmgr",
+ 365: "dtk",
+ 366: "odmr",
+ 367: "mortgageware",
+ 368: "qbikgdp",
+ 369: "rpc2portmap",
+ 370: "codaauth2",
+ 371: "clearcase",
+ 372: "ulistproc",
+ 373: "legent-1",
+ 374: "legent-2",
+ 375: "hassle",
+ 376: "nip",
+ 377: "tnETOS",
+ 378: "dsETOS",
+ 379: "is99c",
+ 380: "is99s",
+ 381: "hp-collector",
+ 382: "hp-managed-node",
+ 383: "hp-alarm-mgr",
+ 384: "arns",
+ 385: "ibm-app",
+ 386: "asa",
+ 387: "aurp",
+ 388: "unidata-ldm",
+ 389: "ldap",
+ 390: "uis",
+ 391: "synotics-relay",
+ 392: "synotics-broker",
+ 393: "meta5",
+ 394: "embl-ndt",
+ 395: "netcp",
+ 396: "netware-ip",
+ 397: "mptn",
+ 398: "kryptolan",
+ 399: "iso-tsap-c2",
+ 400: "osb-sd",
+ 401: "ups",
+ 402: "genie",
+ 403: "decap",
+ 404: "nced",
+ 405: "ncld",
+ 406: "imsp",
+ 407: "timbuktu",
+ 408: "prm-sm",
+ 409: "prm-nm",
+ 410: "decladebug",
+ 411: "rmt",
+ 412: "synoptics-trap",
+ 413: "smsp",
+ 414: "infoseek",
+ 415: "bnet",
+ 416: "silverplatter",
+ 417: "onmux",
+ 418: "hyper-g",
+ 419: "ariel1",
+ 420: "smpte",
+ 421: "ariel2",
+ 422: "ariel3",
+ 423: "opc-job-start",
+ 424: "opc-job-track",
+ 425: "icad-el",
+ 426: "smartsdp",
+ 427: "svrloc",
+ 428: "ocs-cmu",
+ 429: "ocs-amu",
+ 430: "utmpsd",
+ 431: "utmpcd",
+ 432: "iasd",
+ 433: "nnsp",
+ 434: "mobileip-agent",
+ 435: "mobilip-mn",
+ 436: "dna-cml",
+ 437: "comscm",
+ 438: "dsfgw",
+ 439: "dasp",
+ 440: "sgcp",
+ 441: "decvms-sysmgt",
+ 442: "cvc-hostd",
+ 443: "https",
+ 444: "snpp",
+ 445: "microsoft-ds",
+ 446: "ddm-rdb",
+ 447: "ddm-dfm",
+ 448: "ddm-ssl",
+ 449: "as-servermap",
+ 450: "tserver",
+ 451: "sfs-smp-net",
+ 452: "sfs-config",
+ 453: "creativeserver",
+ 454: "contentserver",
+ 455: "creativepartnr",
+ 456: "macon-tcp",
+ 457: "scohelp",
+ 458: "appleqtc",
+ 459: "ampr-rcmd",
+ 460: "skronk",
+ 461: "datasurfsrv",
+ 462: "datasurfsrvsec",
+ 463: "alpes",
+ 464: "kpasswd",
+ 465: "urd",
+ 466: "digital-vrc",
+ 467: "mylex-mapd",
+ 468: "photuris",
+ 469: "rcp",
+ 470: "scx-proxy",
+ 471: "mondex",
+ 472: "ljk-login",
+ 473: "hybrid-pop",
+ 474: "tn-tl-w1",
+ 475: "tcpnethaspsrv",
+ 476: "tn-tl-fd1",
+ 477: "ss7ns",
+ 478: "spsc",
+ 479: "iafserver",
+ 480: "iafdbase",
+ 481: "ph",
+ 482: "bgs-nsi",
+ 483: "ulpnet",
+ 484: "integra-sme",
+ 485: "powerburst",
+ 486: "avian",
+ 487: "saft",
+ 488: "gss-http",
+ 489: "nest-protocol",
+ 490: "micom-pfs",
+ 491: "go-login",
+ 492: "ticf-1",
+ 493: "ticf-2",
+ 494: "pov-ray",
+ 495: "intecourier",
+ 496: "pim-rp-disc",
+ 497: "retrospect",
+ 498: "siam",
+ 499: "iso-ill",
+ 500: "isakmp",
+ 501: "stmf",
+ 502: "mbap",
+ 503: "intrinsa",
+ 504: "citadel",
+ 505: "mailbox-lm",
+ 506: "ohimsrv",
+ 507: "crs",
+ 508: "xvttp",
+ 509: "snare",
+ 510: "fcp",
+ 511: "passgo",
+ 512: "exec",
+ 513: "login",
+ 514: "shell",
+ 515: "printer",
+ 516: "videotex",
+ 517: "talk",
+ 518: "ntalk",
+ 519: "utime",
+ 520: "efs",
+ 521: "ripng",
+ 522: "ulp",
+ 523: "ibm-db2",
+ 524: "ncp",
+ 525: "timed",
+ 526: "tempo",
+ 527: "stx",
+ 528: "custix",
+ 529: "irc-serv",
+ 530: "courier",
+ 531: "conference",
+ 532: "netnews",
+ 533: "netwall",
+ 534: "windream",
+ 535: "iiop",
+ 536: "opalis-rdv",
+ 537: "nmsp",
+ 538: "gdomap",
+ 539: "apertus-ldp",
+ 540: "uucp",
+ 541: "uucp-rlogin",
+ 542: "commerce",
+ 543: "klogin",
+ 544: "kshell",
+ 545: "appleqtcsrvr",
+ 546: "dhcpv6-client",
+ 547: "dhcpv6-server",
+ 548: "afpovertcp",
+ 549: "idfp",
+ 550: "new-rwho",
+ 551: "cybercash",
+ 552: "devshr-nts",
+ 553: "pirp",
+ 554: "rtsp",
+ 555: "dsf",
+ 556: "remotefs",
+ 557: "openvms-sysipc",
+ 558: "sdnskmp",
+ 559: "teedtap",
+ 560: "rmonitor",
+ 561: "monitor",
+ 562: "chshell",
+ 563: "nntps",
+ 564: "9pfs",
+ 565: "whoami",
+ 566: "streettalk",
+ 567: "banyan-rpc",
+ 568: "ms-shuttle",
+ 569: "ms-rome",
+ 570: "meter",
+ 571: "meter",
+ 572: "sonar",
+ 573: "banyan-vip",
+ 574: "ftp-agent",
+ 575: "vemmi",
+ 576: "ipcd",
+ 577: "vnas",
+ 578: "ipdd",
+ 579: "decbsrv",
+ 580: "sntp-heartbeat",
+ 581: "bdp",
+ 582: "scc-security",
+ 583: "philips-vc",
+ 584: "keyserver",
+ 586: "password-chg",
+ 587: "submission",
+ 588: "cal",
+ 589: "eyelink",
+ 590: "tns-cml",
+ 591: "http-alt",
+ 592: "eudora-set",
+ 593: "http-rpc-epmap",
+ 594: "tpip",
+ 595: "cab-protocol",
+ 596: "smsd",
+ 597: "ptcnameservice",
+ 598: "sco-websrvrmg3",
+ 599: "acp",
+ 600: "ipcserver",
+ 601: "syslog-conn",
+ 602: "xmlrpc-beep",
+ 603: "idxp",
+ 604: "tunnel",
+ 605: "soap-beep",
+ 606: "urm",
+ 607: "nqs",
+ 608: "sift-uft",
+ 609: "npmp-trap",
+ 610: "npmp-local",
+ 611: "npmp-gui",
+ 612: "hmmp-ind",
+ 613: "hmmp-op",
+ 614: "sshell",
+ 615: "sco-inetmgr",
+ 616: "sco-sysmgr",
+ 617: "sco-dtmgr",
+ 618: "dei-icda",
+ 619: "compaq-evm",
+ 620: "sco-websrvrmgr",
+ 621: "escp-ip",
+ 622: "collaborator",
+ 623: "oob-ws-http",
+ 624: "cryptoadmin",
+ 625: "dec-dlm",
+ 626: "asia",
+ 627: "passgo-tivoli",
+ 628: "qmqp",
+ 629: "3com-amp3",
+ 630: "rda",
+ 631: "ipp",
+ 632: "bmpp",
+ 633: "servstat",
+ 634: "ginad",
+ 635: "rlzdbase",
+ 636: "ldaps",
+ 637: "lanserver",
+ 638: "mcns-sec",
+ 639: "msdp",
+ 640: "entrust-sps",
+ 641: "repcmd",
+ 642: "esro-emsdp",
+ 643: "sanity",
+ 644: "dwr",
+ 645: "pssc",
+ 646: "ldp",
+ 647: "dhcp-failover",
+ 648: "rrp",
+ 649: "cadview-3d",
+ 650: "obex",
+ 651: "ieee-mms",
+ 652: "hello-port",
+ 653: "repscmd",
+ 654: "aodv",
+ 655: "tinc",
+ 656: "spmp",
+ 657: "rmc",
+ 658: "tenfold",
+ 660: "mac-srvr-admin",
+ 661: "hap",
+ 662: "pftp",
+ 663: "purenoise",
+ 664: "oob-ws-https",
+ 665: "sun-dr",
+ 666: "mdqs",
+ 667: "disclose",
+ 668: "mecomm",
+ 669: "meregister",
+ 670: "vacdsm-sws",
+ 671: "vacdsm-app",
+ 672: "vpps-qua",
+ 673: "cimplex",
+ 674: "acap",
+ 675: "dctp",
+ 676: "vpps-via",
+ 677: "vpp",
+ 678: "ggf-ncp",
+ 679: "mrm",
+ 680: "entrust-aaas",
+ 681: "entrust-aams",
+ 682: "xfr",
+ 683: "corba-iiop",
+ 684: "corba-iiop-ssl",
+ 685: "mdc-portmapper",
+ 686: "hcp-wismar",
+ 687: "asipregistry",
+ 688: "realm-rusd",
+ 689: "nmap",
+ 690: "vatp",
+ 691: "msexch-routing",
+ 692: "hyperwave-isp",
+ 693: "connendp",
+ 694: "ha-cluster",
+ 695: "ieee-mms-ssl",
+ 696: "rushd",
+ 697: "uuidgen",
+ 698: "olsr",
+ 699: "accessnetwork",
+ 700: "epp",
+ 701: "lmp",
+ 702: "iris-beep",
+ 704: "elcsd",
+ 705: "agentx",
+ 706: "silc",
+ 707: "borland-dsj",
+ 709: "entrust-kmsh",
+ 710: "entrust-ash",
+ 711: "cisco-tdp",
+ 712: "tbrpf",
+ 713: "iris-xpc",
+ 714: "iris-xpcs",
+ 715: "iris-lwz",
+ 729: "netviewdm1",
+ 730: "netviewdm2",
+ 731: "netviewdm3",
+ 741: "netgw",
+ 742: "netrcs",
+ 744: "flexlm",
+ 747: "fujitsu-dev",
+ 748: "ris-cm",
+ 749: "kerberos-adm",
+ 750: "rfile",
+ 751: "pump",
+ 752: "qrh",
+ 753: "rrh",
+ 754: "tell",
+ 758: "nlogin",
+ 759: "con",
+ 760: "ns",
+ 761: "rxe",
+ 762: "quotad",
+ 763: "cycleserv",
+ 764: "omserv",
+ 765: "webster",
+ 767: "phonebook",
+ 769: "vid",
+ 770: "cadlock",
+ 771: "rtip",
+ 772: "cycleserv2",
+ 773: "submit",
+ 774: "rpasswd",
+ 775: "entomb",
+ 776: "wpages",
+ 777: "multiling-http",
+ 780: "wpgs",
+ 800: "mdbs-daemon",
+ 801: "device",
+ 802: "mbap-s",
+ 810: "fcp-udp",
+ 828: "itm-mcell-s",
+ 829: "pkix-3-ca-ra",
+ 830: "netconf-ssh",
+ 831: "netconf-beep",
+ 832: "netconfsoaphttp",
+ 833: "netconfsoapbeep",
+ 847: "dhcp-failover2",
+ 848: "gdoi",
+ 853: "domain-s",
+ 854: "dlep",
+ 860: "iscsi",
+ 861: "owamp-control",
+ 862: "twamp-control",
+ 873: "rsync",
+ 886: "iclcnet-locate",
+ 887: "iclcnet-svinfo",
+ 888: "accessbuilder",
+ 900: "omginitialrefs",
+ 901: "smpnameres",
+ 902: "ideafarm-door",
+ 903: "ideafarm-panic",
+ 910: "kink",
+ 911: "xact-backup",
+ 912: "apex-mesh",
+ 913: "apex-edge",
+ 953: "rndc",
+ 989: "ftps-data",
+ 990: "ftps",
+ 991: "nas",
+ 992: "telnets",
+ 993: "imaps",
+ 995: "pop3s",
+ 996: "vsinet",
+ 997: "maitrd",
+ 998: "busboy",
+ 999: "garcon",
+ 1000: "cadlock2",
+ 1001: "webpush",
+ 1010: "surf",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1025: "blackjack",
+ 1026: "cap",
+ 1029: "solid-mux",
+ 1033: "netinfo-local",
+ 1034: "activesync",
+ 1035: "mxxrlogin",
+ 1036: "nsstp",
+ 1037: "ams",
+ 1038: "mtqp",
+ 1039: "sbl",
+ 1040: "netarx",
+ 1041: "danf-ak2",
+ 1042: "afrog",
+ 1043: "boinc-client",
+ 1044: "dcutility",
+ 1045: "fpitp",
+ 1046: "wfremotertm",
+ 1047: "neod1",
+ 1048: "neod2",
+ 1049: "td-postman",
+ 1050: "cma",
+ 1051: "optima-vnet",
+ 1052: "ddt",
+ 1053: "remote-as",
+ 1054: "brvread",
+ 1055: "ansyslmd",
+ 1056: "vfo",
+ 1057: "startron",
+ 1058: "nim",
+ 1059: "nimreg",
+ 1060: "polestar",
+ 1061: "kiosk",
+ 1062: "veracity",
+ 1063: "kyoceranetdev",
+ 1064: "jstel",
+ 1065: "syscomlan",
+ 1066: "fpo-fns",
+ 1067: "instl-boots",
+ 1068: "instl-bootc",
+ 1069: "cognex-insight",
+ 1070: "gmrupdateserv",
+ 1071: "bsquare-voip",
+ 1072: "cardax",
+ 1073: "bridgecontrol",
+ 1074: "warmspotMgmt",
+ 1075: "rdrmshc",
+ 1076: "dab-sti-c",
+ 1077: "imgames",
+ 1078: "avocent-proxy",
+ 1079: "asprovatalk",
+ 1080: "socks",
+ 1081: "pvuniwien",
+ 1082: "amt-esd-prot",
+ 1083: "ansoft-lm-1",
+ 1084: "ansoft-lm-2",
+ 1085: "webobjects",
+ 1086: "cplscrambler-lg",
+ 1087: "cplscrambler-in",
+ 1088: "cplscrambler-al",
+ 1089: "ff-annunc",
+ 1090: "ff-fms",
+ 1091: "ff-sm",
+ 1092: "obrpd",
+ 1093: "proofd",
+ 1094: "rootd",
+ 1095: "nicelink",
+ 1096: "cnrprotocol",
+ 1097: "sunclustermgr",
+ 1098: "rmiactivation",
+ 1099: "rmiregistry",
+ 1100: "mctp",
+ 1101: "pt2-discover",
+ 1102: "adobeserver-1",
+ 1103: "adobeserver-2",
+ 1104: "xrl",
+ 1105: "ftranhc",
+ 1106: "isoipsigport-1",
+ 1107: "isoipsigport-2",
+ 1108: "ratio-adp",
+ 1110: "webadmstart",
+ 1111: "lmsocialserver",
+ 1112: "icp",
+ 1113: "ltp-deepspace",
+ 1114: "mini-sql",
+ 1115: "ardus-trns",
+ 1116: "ardus-cntl",
+ 1117: "ardus-mtrns",
+ 1118: "sacred",
+ 1119: "bnetgame",
+ 1120: "bnetfile",
+ 1121: "rmpp",
+ 1122: "availant-mgr",
+ 1123: "murray",
+ 1124: "hpvmmcontrol",
+ 1125: "hpvmmagent",
+ 1126: "hpvmmdata",
+ 1127: "kwdb-commn",
+ 1128: "saphostctrl",
+ 1129: "saphostctrls",
+ 1130: "casp",
+ 1131: "caspssl",
+ 1132: "kvm-via-ip",
+ 1133: "dfn",
+ 1134: "aplx",
+ 1135: "omnivision",
+ 1136: "hhb-gateway",
+ 1137: "trim",
+ 1138: "encrypted-admin",
+ 1139: "evm",
+ 1140: "autonoc",
+ 1141: "mxomss",
+ 1142: "edtools",
+ 1143: "imyx",
+ 1144: "fuscript",
+ 1145: "x9-icue",
+ 1146: "audit-transfer",
+ 1147: "capioverlan",
+ 1148: "elfiq-repl",
+ 1149: "bvtsonar",
+ 1150: "blaze",
+ 1151: "unizensus",
+ 1152: "winpoplanmess",
+ 1153: "c1222-acse",
+ 1154: "resacommunity",
+ 1155: "nfa",
+ 1156: "iascontrol-oms",
+ 1157: "iascontrol",
+ 1158: "dbcontrol-oms",
+ 1159: "oracle-oms",
+ 1160: "olsv",
+ 1161: "health-polling",
+ 1162: "health-trap",
+ 1163: "sddp",
+ 1164: "qsm-proxy",
+ 1165: "qsm-gui",
+ 1166: "qsm-remote",
+ 1167: "cisco-ipsla",
+ 1168: "vchat",
+ 1169: "tripwire",
+ 1170: "atc-lm",
+ 1171: "atc-appserver",
+ 1172: "dnap",
+ 1173: "d-cinema-rrp",
+ 1174: "fnet-remote-ui",
+ 1175: "dossier",
+ 1176: "indigo-server",
+ 1177: "dkmessenger",
+ 1178: "sgi-storman",
+ 1179: "b2n",
+ 1180: "mc-client",
+ 1181: "3comnetman",
+ 1182: "accelenet",
+ 1183: "llsurfup-http",
+ 1184: "llsurfup-https",
+ 1185: "catchpole",
+ 1186: "mysql-cluster",
+ 1187: "alias",
+ 1188: "hp-webadmin",
+ 1189: "unet",
+ 1190: "commlinx-avl",
+ 1191: "gpfs",
+ 1192: "caids-sensor",
+ 1193: "fiveacross",
+ 1194: "openvpn",
+ 1195: "rsf-1",
+ 1196: "netmagic",
+ 1197: "carrius-rshell",
+ 1198: "cajo-discovery",
+ 1199: "dmidi",
+ 1200: "scol",
+ 1201: "nucleus-sand",
+ 1202: "caiccipc",
+ 1203: "ssslic-mgr",
+ 1204: "ssslog-mgr",
+ 1205: "accord-mgc",
+ 1206: "anthony-data",
+ 1207: "metasage",
+ 1208: "seagull-ais",
+ 1209: "ipcd3",
+ 1210: "eoss",
+ 1211: "groove-dpp",
+ 1212: "lupa",
+ 1213: "mpc-lifenet",
+ 1214: "kazaa",
+ 1215: "scanstat-1",
+ 1216: "etebac5",
+ 1217: "hpss-ndapi",
+ 1218: "aeroflight-ads",
+ 1219: "aeroflight-ret",
+ 1220: "qt-serveradmin",
+ 1221: "sweetware-apps",
+ 1222: "nerv",
+ 1223: "tgp",
+ 1224: "vpnz",
+ 1225: "slinkysearch",
+ 1226: "stgxfws",
+ 1227: "dns2go",
+ 1228: "florence",
+ 1229: "zented",
+ 1230: "periscope",
+ 1231: "menandmice-lpm",
+ 1232: "first-defense",
+ 1233: "univ-appserver",
+ 1234: "search-agent",
+ 1235: "mosaicsyssvc1",
+ 1236: "bvcontrol",
+ 1237: "tsdos390",
+ 1238: "hacl-qs",
+ 1239: "nmsd",
+ 1240: "instantia",
+ 1241: "nessus",
+ 1242: "nmasoverip",
+ 1243: "serialgateway",
+ 1244: "isbconference1",
+ 1245: "isbconference2",
+ 1246: "payrouter",
+ 1247: "visionpyramid",
+ 1248: "hermes",
+ 1249: "mesavistaco",
+ 1250: "swldy-sias",
+ 1251: "servergraph",
+ 1252: "bspne-pcc",
+ 1253: "q55-pcc",
+ 1254: "de-noc",
+ 1255: "de-cache-query",
+ 1256: "de-server",
+ 1257: "shockwave2",
+ 1258: "opennl",
+ 1259: "opennl-voice",
+ 1260: "ibm-ssd",
+ 1261: "mpshrsv",
+ 1262: "qnts-orb",
+ 1263: "dka",
+ 1264: "prat",
+ 1265: "dssiapi",
+ 1266: "dellpwrappks",
+ 1267: "epc",
+ 1268: "propel-msgsys",
+ 1269: "watilapp",
+ 1270: "opsmgr",
+ 1271: "excw",
+ 1272: "cspmlockmgr",
+ 1273: "emc-gateway",
+ 1274: "t1distproc",
+ 1275: "ivcollector",
+ 1277: "miva-mqs",
+ 1278: "dellwebadmin-1",
+ 1279: "dellwebadmin-2",
+ 1280: "pictrography",
+ 1281: "healthd",
+ 1282: "emperion",
+ 1283: "productinfo",
+ 1284: "iee-qfx",
+ 1285: "neoiface",
+ 1286: "netuitive",
+ 1287: "routematch",
+ 1288: "navbuddy",
+ 1289: "jwalkserver",
+ 1290: "winjaserver",
+ 1291: "seagulllms",
+ 1292: "dsdn",
+ 1293: "pkt-krb-ipsec",
+ 1294: "cmmdriver",
+ 1295: "ehtp",
+ 1296: "dproxy",
+ 1297: "sdproxy",
+ 1298: "lpcp",
+ 1299: "hp-sci",
+ 1300: "h323hostcallsc",
+ 1301: "ci3-software-1",
+ 1302: "ci3-software-2",
+ 1303: "sftsrv",
+ 1304: "boomerang",
+ 1305: "pe-mike",
+ 1306: "re-conn-proto",
+ 1307: "pacmand",
+ 1308: "odsi",
+ 1309: "jtag-server",
+ 1310: "husky",
+ 1311: "rxmon",
+ 1312: "sti-envision",
+ 1313: "bmc-patroldb",
+ 1314: "pdps",
+ 1315: "els",
+ 1316: "exbit-escp",
+ 1317: "vrts-ipcserver",
+ 1318: "krb5gatekeeper",
+ 1319: "amx-icsp",
+ 1320: "amx-axbnet",
+ 1321: "pip",
+ 1322: "novation",
+ 1323: "brcd",
+ 1324: "delta-mcp",
+ 1325: "dx-instrument",
+ 1326: "wimsic",
+ 1327: "ultrex",
+ 1328: "ewall",
+ 1329: "netdb-export",
+ 1330: "streetperfect",
+ 1331: "intersan",
+ 1332: "pcia-rxp-b",
+ 1333: "passwrd-policy",
+ 1334: "writesrv",
+ 1335: "digital-notary",
+ 1336: "ischat",
+ 1337: "menandmice-dns",
+ 1338: "wmc-log-svc",
+ 1339: "kjtsiteserver",
+ 1340: "naap",
+ 1341: "qubes",
+ 1342: "esbroker",
+ 1343: "re101",
+ 1344: "icap",
+ 1345: "vpjp",
+ 1346: "alta-ana-lm",
+ 1347: "bbn-mmc",
+ 1348: "bbn-mmx",
+ 1349: "sbook",
+ 1350: "editbench",
+ 1351: "equationbuilder",
+ 1352: "lotusnote",
+ 1353: "relief",
+ 1354: "XSIP-network",
+ 1355: "intuitive-edge",
+ 1356: "cuillamartin",
+ 1357: "pegboard",
+ 1358: "connlcli",
+ 1359: "ftsrv",
+ 1360: "mimer",
+ 1361: "linx",
+ 1362: "timeflies",
+ 1363: "ndm-requester",
+ 1364: "ndm-server",
+ 1365: "adapt-sna",
+ 1366: "netware-csp",
+ 1367: "dcs",
+ 1368: "screencast",
+ 1369: "gv-us",
+ 1370: "us-gv",
+ 1371: "fc-cli",
+ 1372: "fc-ser",
+ 1373: "chromagrafx",
+ 1374: "molly",
+ 1375: "bytex",
+ 1376: "ibm-pps",
+ 1377: "cichlid",
+ 1378: "elan",
+ 1379: "dbreporter",
+ 1380: "telesis-licman",
+ 1381: "apple-licman",
+ 1382: "udt-os",
+ 1383: "gwha",
+ 1384: "os-licman",
+ 1385: "atex-elmd",
+ 1386: "checksum",
+ 1387: "cadsi-lm",
+ 1388: "objective-dbc",
+ 1389: "iclpv-dm",
+ 1390: "iclpv-sc",
+ 1391: "iclpv-sas",
+ 1392: "iclpv-pm",
+ 1393: "iclpv-nls",
+ 1394: "iclpv-nlc",
+ 1395: "iclpv-wsm",
+ 1396: "dvl-activemail",
+ 1397: "audio-activmail",
+ 1398: "video-activmail",
+ 1399: "cadkey-licman",
+ 1400: "cadkey-tablet",
+ 1401: "goldleaf-licman",
+ 1402: "prm-sm-np",
+ 1403: "prm-nm-np",
+ 1404: "igi-lm",
+ 1405: "ibm-res",
+ 1406: "netlabs-lm",
+ 1407: "tibet-server",
+ 1408: "sophia-lm",
+ 1409: "here-lm",
+ 1410: "hiq",
+ 1411: "af",
+ 1412: "innosys",
+ 1413: "innosys-acl",
+ 1414: "ibm-mqseries",
+ 1415: "dbstar",
+ 1416: "novell-lu6-2",
+ 1417: "timbuktu-srv1",
+ 1418: "timbuktu-srv2",
+ 1419: "timbuktu-srv3",
+ 1420: "timbuktu-srv4",
+ 1421: "gandalf-lm",
+ 1422: "autodesk-lm",
+ 1423: "essbase",
+ 1424: "hybrid",
+ 1425: "zion-lm",
+ 1426: "sais",
+ 1427: "mloadd",
+ 1428: "informatik-lm",
+ 1429: "nms",
+ 1430: "tpdu",
+ 1431: "rgtp",
+ 1432: "blueberry-lm",
+ 1433: "ms-sql-s",
+ 1434: "ms-sql-m",
+ 1435: "ibm-cics",
+ 1436: "saism",
+ 1437: "tabula",
+ 1438: "eicon-server",
+ 1439: "eicon-x25",
+ 1440: "eicon-slp",
+ 1441: "cadis-1",
+ 1442: "cadis-2",
+ 1443: "ies-lm",
+ 1444: "marcam-lm",
+ 1445: "proxima-lm",
+ 1446: "ora-lm",
+ 1447: "apri-lm",
+ 1448: "oc-lm",
+ 1449: "peport",
+ 1450: "dwf",
+ 1451: "infoman",
+ 1452: "gtegsc-lm",
+ 1453: "genie-lm",
+ 1454: "interhdl-elmd",
+ 1455: "esl-lm",
+ 1456: "dca",
+ 1457: "valisys-lm",
+ 1458: "nrcabq-lm",
+ 1459: "proshare1",
+ 1460: "proshare2",
+ 1461: "ibm-wrless-lan",
+ 1462: "world-lm",
+ 1463: "nucleus",
+ 1464: "msl-lmd",
+ 1465: "pipes",
+ 1466: "oceansoft-lm",
+ 1467: "csdmbase",
+ 1468: "csdm",
+ 1469: "aal-lm",
+ 1470: "uaiact",
+ 1471: "csdmbase",
+ 1472: "csdm",
+ 1473: "openmath",
+ 1474: "telefinder",
+ 1475: "taligent-lm",
+ 1476: "clvm-cfg",
+ 1477: "ms-sna-server",
+ 1478: "ms-sna-base",
+ 1479: "dberegister",
+ 1480: "pacerforum",
+ 1481: "airs",
+ 1482: "miteksys-lm",
+ 1483: "afs",
+ 1484: "confluent",
+ 1485: "lansource",
+ 1486: "nms-topo-serv",
+ 1487: "localinfosrvr",
+ 1488: "docstor",
+ 1489: "dmdocbroker",
+ 1490: "insitu-conf",
+ 1492: "stone-design-1",
+ 1493: "netmap-lm",
+ 1494: "ica",
+ 1495: "cvc",
+ 1496: "liberty-lm",
+ 1497: "rfx-lm",
+ 1498: "sybase-sqlany",
+ 1499: "fhc",
+ 1500: "vlsi-lm",
+ 1501: "saiscm",
+ 1502: "shivadiscovery",
+ 1503: "imtc-mcs",
+ 1504: "evb-elm",
+ 1505: "funkproxy",
+ 1506: "utcd",
+ 1507: "symplex",
+ 1508: "diagmond",
+ 1509: "robcad-lm",
+ 1510: "mvx-lm",
+ 1511: "3l-l1",
+ 1512: "wins",
+ 1513: "fujitsu-dtc",
+ 1514: "fujitsu-dtcns",
+ 1515: "ifor-protocol",
+ 1516: "vpad",
+ 1517: "vpac",
+ 1518: "vpvd",
+ 1519: "vpvc",
+ 1520: "atm-zip-office",
+ 1521: "ncube-lm",
+ 1522: "ricardo-lm",
+ 1523: "cichild-lm",
+ 1524: "ingreslock",
+ 1525: "orasrv",
+ 1526: "pdap-np",
+ 1527: "tlisrv",
+ 1529: "coauthor",
+ 1530: "rap-service",
+ 1531: "rap-listen",
+ 1532: "miroconnect",
+ 1533: "virtual-places",
+ 1534: "micromuse-lm",
+ 1535: "ampr-info",
+ 1536: "ampr-inter",
+ 1537: "sdsc-lm",
+ 1538: "3ds-lm",
+ 1539: "intellistor-lm",
+ 1540: "rds",
+ 1541: "rds2",
+ 1542: "gridgen-elmd",
+ 1543: "simba-cs",
+ 1544: "aspeclmd",
+ 1545: "vistium-share",
+ 1546: "abbaccuray",
+ 1547: "laplink",
+ 1548: "axon-lm",
+ 1549: "shivahose",
+ 1550: "3m-image-lm",
+ 1551: "hecmtl-db",
+ 1552: "pciarray",
+ 1553: "sna-cs",
+ 1554: "caci-lm",
+ 1555: "livelan",
+ 1556: "veritas-pbx",
+ 1557: "arbortext-lm",
+ 1558: "xingmpeg",
+ 1559: "web2host",
+ 1560: "asci-val",
+ 1561: "facilityview",
+ 1562: "pconnectmgr",
+ 1563: "cadabra-lm",
+ 1564: "pay-per-view",
+ 1565: "winddlb",
+ 1566: "corelvideo",
+ 1567: "jlicelmd",
+ 1568: "tsspmap",
+ 1569: "ets",
+ 1570: "orbixd",
+ 1571: "rdb-dbs-disp",
+ 1572: "chip-lm",
+ 1573: "itscomm-ns",
+ 1574: "mvel-lm",
+ 1575: "oraclenames",
+ 1576: "moldflow-lm",
+ 1577: "hypercube-lm",
+ 1578: "jacobus-lm",
+ 1579: "ioc-sea-lm",
+ 1580: "tn-tl-r1",
+ 1581: "mil-2045-47001",
+ 1582: "msims",
+ 1583: "simbaexpress",
+ 1584: "tn-tl-fd2",
+ 1585: "intv",
+ 1586: "ibm-abtact",
+ 1587: "pra-elmd",
+ 1588: "triquest-lm",
+ 1589: "vqp",
+ 1590: "gemini-lm",
+ 1591: "ncpm-pm",
+ 1592: "commonspace",
+ 1593: "mainsoft-lm",
+ 1594: "sixtrak",
+ 1595: "radio",
+ 1596: "radio-sm",
+ 1597: "orbplus-iiop",
+ 1598: "picknfs",
+ 1599: "simbaservices",
+ 1600: "issd",
+ 1601: "aas",
+ 1602: "inspect",
+ 1603: "picodbc",
+ 1604: "icabrowser",
+ 1605: "slp",
+ 1606: "slm-api",
+ 1607: "stt",
+ 1608: "smart-lm",
+ 1609: "isysg-lm",
+ 1610: "taurus-wh",
+ 1611: "ill",
+ 1612: "netbill-trans",
+ 1613: "netbill-keyrep",
+ 1614: "netbill-cred",
+ 1615: "netbill-auth",
+ 1616: "netbill-prod",
+ 1617: "nimrod-agent",
+ 1618: "skytelnet",
+ 1619: "xs-openstorage",
+ 1620: "faxportwinport",
+ 1621: "softdataphone",
+ 1622: "ontime",
+ 1623: "jaleosnd",
+ 1624: "udp-sr-port",
+ 1625: "svs-omagent",
+ 1626: "shockwave",
+ 1627: "t128-gateway",
+ 1628: "lontalk-norm",
+ 1629: "lontalk-urgnt",
+ 1630: "oraclenet8cman",
+ 1631: "visitview",
+ 1632: "pammratc",
+ 1633: "pammrpc",
+ 1634: "loaprobe",
+ 1635: "edb-server1",
+ 1636: "isdc",
+ 1637: "islc",
+ 1638: "ismc",
+ 1639: "cert-initiator",
+ 1640: "cert-responder",
+ 1641: "invision",
+ 1642: "isis-am",
+ 1643: "isis-ambc",
+ 1644: "saiseh",
+ 1645: "sightline",
+ 1646: "sa-msg-port",
+ 1647: "rsap",
+ 1648: "concurrent-lm",
+ 1649: "kermit",
+ 1650: "nkd",
+ 1651: "shiva-confsrvr",
+ 1652: "xnmp",
+ 1653: "alphatech-lm",
+ 1654: "stargatealerts",
+ 1655: "dec-mbadmin",
+ 1656: "dec-mbadmin-h",
+ 1657: "fujitsu-mmpdc",
+ 1658: "sixnetudr",
+ 1659: "sg-lm",
+ 1660: "skip-mc-gikreq",
+ 1661: "netview-aix-1",
+ 1662: "netview-aix-2",
+ 1663: "netview-aix-3",
+ 1664: "netview-aix-4",
+ 1665: "netview-aix-5",
+ 1666: "netview-aix-6",
+ 1667: "netview-aix-7",
+ 1668: "netview-aix-8",
+ 1669: "netview-aix-9",
+ 1670: "netview-aix-10",
+ 1671: "netview-aix-11",
+ 1672: "netview-aix-12",
+ 1673: "proshare-mc-1",
+ 1674: "proshare-mc-2",
+ 1675: "pdp",
+ 1676: "netcomm1",
+ 1677: "groupwise",
+ 1678: "prolink",
+ 1679: "darcorp-lm",
+ 1680: "microcom-sbp",
+ 1681: "sd-elmd",
+ 1682: "lanyon-lantern",
+ 1683: "ncpm-hip",
+ 1684: "snaresecure",
+ 1685: "n2nremote",
+ 1686: "cvmon",
+ 1687: "nsjtp-ctrl",
+ 1688: "nsjtp-data",
+ 1689: "firefox",
+ 1690: "ng-umds",
+ 1691: "empire-empuma",
+ 1692: "sstsys-lm",
+ 1693: "rrirtr",
+ 1694: "rrimwm",
+ 1695: "rrilwm",
+ 1696: "rrifmm",
+ 1697: "rrisat",
+ 1698: "rsvp-encap-1",
+ 1699: "rsvp-encap-2",
+ 1700: "mps-raft",
+ 1701: "l2f",
+ 1702: "deskshare",
+ 1703: "hb-engine",
+ 1704: "bcs-broker",
+ 1705: "slingshot",
+ 1706: "jetform",
+ 1707: "vdmplay",
+ 1708: "gat-lmd",
+ 1709: "centra",
+ 1710: "impera",
+ 1711: "pptconference",
+ 1712: "registrar",
+ 1713: "conferencetalk",
+ 1714: "sesi-lm",
+ 1715: "houdini-lm",
+ 1716: "xmsg",
+ 1717: "fj-hdnet",
+ 1718: "h323gatedisc",
+ 1719: "h323gatestat",
+ 1720: "h323hostcall",
+ 1721: "caicci",
+ 1722: "hks-lm",
+ 1723: "pptp",
+ 1724: "csbphonemaster",
+ 1725: "iden-ralp",
+ 1726: "iberiagames",
+ 1727: "winddx",
+ 1728: "telindus",
+ 1729: "citynl",
+ 1730: "roketz",
+ 1731: "msiccp",
+ 1732: "proxim",
+ 1733: "siipat",
+ 1734: "cambertx-lm",
+ 1735: "privatechat",
+ 1736: "street-stream",
+ 1737: "ultimad",
+ 1738: "gamegen1",
+ 1739: "webaccess",
+ 1740: "encore",
+ 1741: "cisco-net-mgmt",
+ 1742: "3Com-nsd",
+ 1743: "cinegrfx-lm",
+ 1744: "ncpm-ft",
+ 1745: "remote-winsock",
+ 1746: "ftrapid-1",
+ 1747: "ftrapid-2",
+ 1748: "oracle-em1",
+ 1749: "aspen-services",
+ 1750: "sslp",
+ 1751: "swiftnet",
+ 1752: "lofr-lm",
+ 1753: "predatar-comms",
+ 1754: "oracle-em2",
+ 1755: "ms-streaming",
+ 1756: "capfast-lmd",
+ 1757: "cnhrp",
+ 1758: "tftp-mcast",
+ 1759: "spss-lm",
+ 1760: "www-ldap-gw",
+ 1761: "cft-0",
+ 1762: "cft-1",
+ 1763: "cft-2",
+ 1764: "cft-3",
+ 1765: "cft-4",
+ 1766: "cft-5",
+ 1767: "cft-6",
+ 1768: "cft-7",
+ 1769: "bmc-net-adm",
+ 1770: "bmc-net-svc",
+ 1771: "vaultbase",
+ 1772: "essweb-gw",
+ 1773: "kmscontrol",
+ 1774: "global-dtserv",
+ 1775: "vdab",
+ 1776: "femis",
+ 1777: "powerguardian",
+ 1778: "prodigy-intrnet",
+ 1779: "pharmasoft",
+ 1780: "dpkeyserv",
+ 1781: "answersoft-lm",
+ 1782: "hp-hcip",
+ 1784: "finle-lm",
+ 1785: "windlm",
+ 1786: "funk-logger",
+ 1787: "funk-license",
+ 1788: "psmond",
+ 1789: "hello",
+ 1790: "nmsp",
+ 1791: "ea1",
+ 1792: "ibm-dt-2",
+ 1793: "rsc-robot",
+ 1794: "cera-bcm",
+ 1795: "dpi-proxy",
+ 1796: "vocaltec-admin",
+ 1797: "uma",
+ 1798: "etp",
+ 1799: "netrisk",
+ 1800: "ansys-lm",
+ 1801: "msmq",
+ 1802: "concomp1",
+ 1803: "hp-hcip-gwy",
+ 1804: "enl",
+ 1805: "enl-name",
+ 1806: "musiconline",
+ 1807: "fhsp",
+ 1808: "oracle-vp2",
+ 1809: "oracle-vp1",
+ 1810: "jerand-lm",
+ 1811: "scientia-sdb",
+ 1812: "radius",
+ 1813: "radius-acct",
+ 1814: "tdp-suite",
+ 1815: "mmpft",
+ 1816: "harp",
+ 1817: "rkb-oscs",
+ 1818: "etftp",
+ 1819: "plato-lm",
+ 1820: "mcagent",
+ 1821: "donnyworld",
+ 1822: "es-elmd",
+ 1823: "unisys-lm",
+ 1824: "metrics-pas",
+ 1825: "direcpc-video",
+ 1826: "ardt",
+ 1827: "asi",
+ 1828: "itm-mcell-u",
+ 1829: "optika-emedia",
+ 1830: "net8-cman",
+ 1831: "myrtle",
+ 1832: "tht-treasure",
+ 1833: "udpradio",
+ 1834: "ardusuni",
+ 1835: "ardusmul",
+ 1836: "ste-smsc",
+ 1837: "csoft1",
+ 1838: "talnet",
+ 1839: "netopia-vo1",
+ 1840: "netopia-vo2",
+ 1841: "netopia-vo3",
+ 1842: "netopia-vo4",
+ 1843: "netopia-vo5",
+ 1844: "direcpc-dll",
+ 1845: "altalink",
+ 1846: "tunstall-pnc",
+ 1847: "slp-notify",
+ 1848: "fjdocdist",
+ 1849: "alpha-sms",
+ 1850: "gsi",
+ 1851: "ctcd",
+ 1852: "virtual-time",
+ 1853: "vids-avtp",
+ 1854: "buddy-draw",
+ 1855: "fiorano-rtrsvc",
+ 1856: "fiorano-msgsvc",
+ 1857: "datacaptor",
+ 1858: "privateark",
+ 1859: "gammafetchsvr",
+ 1860: "sunscalar-svc",
+ 1861: "lecroy-vicp",
+ 1862: "mysql-cm-agent",
+ 1863: "msnp",
+ 1864: "paradym-31port",
+ 1865: "entp",
+ 1866: "swrmi",
+ 1867: "udrive",
+ 1868: "viziblebrowser",
+ 1869: "transact",
+ 1870: "sunscalar-dns",
+ 1871: "canocentral0",
+ 1872: "canocentral1",
+ 1873: "fjmpjps",
+ 1874: "fjswapsnp",
+ 1875: "westell-stats",
+ 1876: "ewcappsrv",
+ 1877: "hp-webqosdb",
+ 1878: "drmsmc",
+ 1879: "nettgain-nms",
+ 1880: "vsat-control",
+ 1881: "ibm-mqseries2",
+ 1882: "ecsqdmn",
+ 1883: "mqtt",
+ 1884: "idmaps",
+ 1885: "vrtstrapserver",
+ 1886: "leoip",
+ 1887: "filex-lport",
+ 1888: "ncconfig",
+ 1889: "unify-adapter",
+ 1890: "wilkenlistener",
+ 1891: "childkey-notif",
+ 1892: "childkey-ctrl",
+ 1893: "elad",
+ 1894: "o2server-port",
+ 1896: "b-novative-ls",
+ 1897: "metaagent",
+ 1898: "cymtec-port",
+ 1899: "mc2studios",
+ 1900: "ssdp",
+ 1901: "fjicl-tep-a",
+ 1902: "fjicl-tep-b",
+ 1903: "linkname",
+ 1904: "fjicl-tep-c",
+ 1905: "sugp",
+ 1906: "tpmd",
+ 1907: "intrastar",
+ 1908: "dawn",
+ 1909: "global-wlink",
+ 1910: "ultrabac",
+ 1911: "mtp",
+ 1912: "rhp-iibp",
+ 1913: "armadp",
+ 1914: "elm-momentum",
+ 1915: "facelink",
+ 1916: "persona",
+ 1917: "noagent",
+ 1918: "can-nds",
+ 1919: "can-dch",
+ 1920: "can-ferret",
+ 1921: "noadmin",
+ 1922: "tapestry",
+ 1923: "spice",
+ 1924: "xiip",
+ 1925: "discovery-port",
+ 1926: "egs",
+ 1927: "videte-cipc",
+ 1928: "emsd-port",
+ 1929: "bandwiz-system",
+ 1930: "driveappserver",
+ 1931: "amdsched",
+ 1932: "ctt-broker",
+ 1933: "xmapi",
+ 1934: "xaapi",
+ 1935: "macromedia-fcs",
+ 1936: "jetcmeserver",
+ 1937: "jwserver",
+ 1938: "jwclient",
+ 1939: "jvserver",
+ 1940: "jvclient",
+ 1941: "dic-aida",
+ 1942: "res",
+ 1943: "beeyond-media",
+ 1944: "close-combat",
+ 1945: "dialogic-elmd",
+ 1946: "tekpls",
+ 1947: "sentinelsrm",
+ 1948: "eye2eye",
+ 1949: "ismaeasdaqlive",
+ 1950: "ismaeasdaqtest",
+ 1951: "bcs-lmserver",
+ 1952: "mpnjsc",
+ 1953: "rapidbase",
+ 1954: "abr-api",
+ 1955: "abr-secure",
+ 1956: "vrtl-vmf-ds",
+ 1957: "unix-status",
+ 1958: "dxadmind",
+ 1959: "simp-all",
+ 1960: "nasmanager",
+ 1961: "bts-appserver",
+ 1962: "biap-mp",
+ 1963: "webmachine",
+ 1964: "solid-e-engine",
+ 1965: "tivoli-npm",
+ 1966: "slush",
+ 1967: "sns-quote",
+ 1968: "lipsinc",
+ 1969: "lipsinc1",
+ 1970: "netop-rc",
+ 1971: "netop-school",
+ 1972: "intersys-cache",
+ 1973: "dlsrap",
+ 1974: "drp",
+ 1975: "tcoflashagent",
+ 1976: "tcoregagent",
+ 1977: "tcoaddressbook",
+ 1978: "unisql",
+ 1979: "unisql-java",
+ 1980: "pearldoc-xact",
+ 1981: "p2pq",
+ 1982: "estamp",
+ 1983: "lhtp",
+ 1984: "bb",
+ 1985: "hsrp",
+ 1986: "licensedaemon",
+ 1987: "tr-rsrb-p1",
+ 1988: "tr-rsrb-p2",
+ 1989: "tr-rsrb-p3",
+ 1990: "stun-p1",
+ 1991: "stun-p2",
+ 1992: "stun-p3",
+ 1993: "snmp-tcp-port",
+ 1994: "stun-port",
+ 1995: "perf-port",
+ 1996: "tr-rsrb-port",
+ 1997: "gdp-port",
+ 1998: "x25-svc-port",
+ 1999: "tcp-id-port",
+ 2000: "cisco-sccp",
+ 2001: "dc",
+ 2002: "globe",
+ 2003: "brutus",
+ 2004: "mailbox",
+ 2005: "berknet",
+ 2006: "invokator",
+ 2007: "dectalk",
+ 2008: "conf",
+ 2009: "news",
+ 2010: "search",
+ 2011: "raid-cc",
+ 2012: "ttyinfo",
+ 2013: "raid-am",
+ 2014: "troff",
+ 2015: "cypress",
+ 2016: "bootserver",
+ 2017: "cypress-stat",
+ 2018: "terminaldb",
+ 2019: "whosockami",
+ 2020: "xinupageserver",
+ 2021: "servexec",
+ 2022: "down",
+ 2023: "xinuexpansion3",
+ 2024: "xinuexpansion4",
+ 2025: "ellpack",
+ 2026: "scrabble",
+ 2027: "shadowserver",
+ 2028: "submitserver",
+ 2029: "hsrpv6",
+ 2030: "device2",
+ 2031: "mobrien-chat",
+ 2032: "blackboard",
+ 2033: "glogger",
+ 2034: "scoremgr",
+ 2035: "imsldoc",
+ 2036: "e-dpnet",
+ 2037: "applus",
+ 2038: "objectmanager",
+ 2039: "prizma",
+ 2040: "lam",
+ 2041: "interbase",
+ 2042: "isis",
+ 2043: "isis-bcast",
+ 2044: "rimsl",
+ 2045: "cdfunc",
+ 2046: "sdfunc",
+ 2047: "dls",
+ 2048: "dls-monitor",
+ 2049: "shilp",
+ 2050: "av-emb-config",
+ 2051: "epnsdp",
+ 2052: "clearvisn",
+ 2053: "lot105-ds-upd",
+ 2054: "weblogin",
+ 2055: "iop",
+ 2056: "omnisky",
+ 2057: "rich-cp",
+ 2058: "newwavesearch",
+ 2059: "bmc-messaging",
+ 2060: "teleniumdaemon",
+ 2061: "netmount",
+ 2062: "icg-swp",
+ 2063: "icg-bridge",
+ 2064: "icg-iprelay",
+ 2065: "dlsrpn",
+ 2066: "aura",
+ 2067: "dlswpn",
+ 2068: "avauthsrvprtcl",
+ 2069: "event-port",
+ 2070: "ah-esp-encap",
+ 2071: "acp-port",
+ 2072: "msync",
+ 2073: "gxs-data-port",
+ 2074: "vrtl-vmf-sa",
+ 2075: "newlixengine",
+ 2076: "newlixconfig",
+ 2077: "tsrmagt",
+ 2078: "tpcsrvr",
+ 2079: "idware-router",
+ 2080: "autodesk-nlm",
+ 2081: "kme-trap-port",
+ 2082: "infowave",
+ 2083: "radsec",
+ 2084: "sunclustergeo",
+ 2085: "ada-cip",
+ 2086: "gnunet",
+ 2087: "eli",
+ 2088: "ip-blf",
+ 2089: "sep",
+ 2090: "lrp",
+ 2091: "prp",
+ 2092: "descent3",
+ 2093: "nbx-cc",
+ 2094: "nbx-au",
+ 2095: "nbx-ser",
+ 2096: "nbx-dir",
+ 2097: "jetformpreview",
+ 2098: "dialog-port",
+ 2099: "h2250-annex-g",
+ 2100: "amiganetfs",
+ 2101: "rtcm-sc104",
+ 2102: "zephyr-srv",
+ 2103: "zephyr-clt",
+ 2104: "zephyr-hm",
+ 2105: "minipay",
+ 2106: "mzap",
+ 2107: "bintec-admin",
+ 2108: "comcam",
+ 2109: "ergolight",
+ 2110: "umsp",
+ 2111: "dsatp",
+ 2112: "idonix-metanet",
+ 2113: "hsl-storm",
+ 2114: "newheights",
+ 2115: "kdm",
+ 2116: "ccowcmr",
+ 2117: "mentaclient",
+ 2118: "mentaserver",
+ 2119: "gsigatekeeper",
+ 2120: "qencp",
+ 2121: "scientia-ssdb",
+ 2122: "caupc-remote",
+ 2123: "gtp-control",
+ 2124: "elatelink",
+ 2125: "lockstep",
+ 2126: "pktcable-cops",
+ 2127: "index-pc-wb",
+ 2128: "net-steward",
+ 2129: "cs-live",
+ 2130: "xds",
+ 2131: "avantageb2b",
+ 2132: "solera-epmap",
+ 2133: "zymed-zpp",
+ 2134: "avenue",
+ 2135: "gris",
+ 2136: "appworxsrv",
+ 2137: "connect",
+ 2138: "unbind-cluster",
+ 2139: "ias-auth",
+ 2140: "ias-reg",
+ 2141: "ias-admind",
+ 2142: "tdmoip",
+ 2143: "lv-jc",
+ 2144: "lv-ffx",
+ 2145: "lv-pici",
+ 2146: "lv-not",
+ 2147: "lv-auth",
+ 2148: "veritas-ucl",
+ 2149: "acptsys",
+ 2150: "dynamic3d",
+ 2151: "docent",
+ 2152: "gtp-user",
+ 2153: "ctlptc",
+ 2154: "stdptc",
+ 2155: "brdptc",
+ 2156: "trp",
+ 2157: "xnds",
+ 2158: "touchnetplus",
+ 2159: "gdbremote",
+ 2160: "apc-2160",
+ 2161: "apc-2161",
+ 2162: "navisphere",
+ 2163: "navisphere-sec",
+ 2164: "ddns-v3",
+ 2165: "x-bone-api",
+ 2166: "iwserver",
+ 2167: "raw-serial",
+ 2168: "easy-soft-mux",
+ 2169: "brain",
+ 2170: "eyetv",
+ 2171: "msfw-storage",
+ 2172: "msfw-s-storage",
+ 2173: "msfw-replica",
+ 2174: "msfw-array",
+ 2175: "airsync",
+ 2176: "rapi",
+ 2177: "qwave",
+ 2178: "bitspeer",
+ 2179: "vmrdp",
+ 2180: "mc-gt-srv",
+ 2181: "eforward",
+ 2182: "cgn-stat",
+ 2183: "cgn-config",
+ 2184: "nvd",
+ 2185: "onbase-dds",
+ 2186: "gtaua",
+ 2187: "ssmc",
+ 2188: "radware-rpm",
+ 2189: "radware-rpm-s",
+ 2190: "tivoconnect",
+ 2191: "tvbus",
+ 2192: "asdis",
+ 2193: "drwcs",
+ 2197: "mnp-exchange",
+ 2198: "onehome-remote",
+ 2199: "onehome-help",
+ 2200: "ici",
+ 2201: "ats",
+ 2202: "imtc-map",
+ 2203: "b2-runtime",
+ 2204: "b2-license",
+ 2205: "jps",
+ 2206: "hpocbus",
+ 2207: "hpssd",
+ 2208: "hpiod",
+ 2209: "rimf-ps",
+ 2210: "noaaport",
+ 2211: "emwin",
+ 2212: "leecoposserver",
+ 2213: "kali",
+ 2214: "rpi",
+ 2215: "ipcore",
+ 2216: "vtu-comms",
+ 2217: "gotodevice",
+ 2218: "bounzza",
+ 2219: "netiq-ncap",
+ 2220: "netiq",
+ 2221: "ethernet-ip-s",
+ 2222: "EtherNet-IP-1",
+ 2223: "rockwell-csp2",
+ 2224: "efi-mg",
+ 2225: "rcip-itu",
+ 2226: "di-drm",
+ 2227: "di-msg",
+ 2228: "ehome-ms",
+ 2229: "datalens",
+ 2230: "queueadm",
+ 2231: "wimaxasncp",
+ 2232: "ivs-video",
+ 2233: "infocrypt",
+ 2234: "directplay",
+ 2235: "sercomm-wlink",
+ 2236: "nani",
+ 2237: "optech-port1-lm",
+ 2238: "aviva-sna",
+ 2239: "imagequery",
+ 2240: "recipe",
+ 2241: "ivsd",
+ 2242: "foliocorp",
+ 2243: "magicom",
+ 2244: "nmsserver",
+ 2245: "hao",
+ 2246: "pc-mta-addrmap",
+ 2247: "antidotemgrsvr",
+ 2248: "ums",
+ 2249: "rfmp",
+ 2250: "remote-collab",
+ 2251: "dif-port",
+ 2252: "njenet-ssl",
+ 2253: "dtv-chan-req",
+ 2254: "seispoc",
+ 2255: "vrtp",
+ 2256: "pcc-mfp",
+ 2257: "simple-tx-rx",
+ 2258: "rcts",
+ 2260: "apc-2260",
+ 2261: "comotionmaster",
+ 2262: "comotionback",
+ 2263: "ecwcfg",
+ 2264: "apx500api-1",
+ 2265: "apx500api-2",
+ 2266: "mfserver",
+ 2267: "ontobroker",
+ 2268: "amt",
+ 2269: "mikey",
+ 2270: "starschool",
+ 2271: "mmcals",
+ 2272: "mmcal",
+ 2273: "mysql-im",
+ 2274: "pcttunnell",
+ 2275: "ibridge-data",
+ 2276: "ibridge-mgmt",
+ 2277: "bluectrlproxy",
+ 2278: "s3db",
+ 2279: "xmquery",
+ 2280: "lnvpoller",
+ 2281: "lnvconsole",
+ 2282: "lnvalarm",
+ 2283: "lnvstatus",
+ 2284: "lnvmaps",
+ 2285: "lnvmailmon",
+ 2286: "nas-metering",
+ 2287: "dna",
+ 2288: "netml",
+ 2289: "dict-lookup",
+ 2290: "sonus-logging",
+ 2291: "eapsp",
+ 2292: "mib-streaming",
+ 2293: "npdbgmngr",
+ 2294: "konshus-lm",
+ 2295: "advant-lm",
+ 2296: "theta-lm",
+ 2297: "d2k-datamover1",
+ 2298: "d2k-datamover2",
+ 2299: "pc-telecommute",
+ 2300: "cvmmon",
+ 2301: "cpq-wbem",
+ 2302: "binderysupport",
+ 2303: "proxy-gateway",
+ 2304: "attachmate-uts",
+ 2305: "mt-scaleserver",
+ 2306: "tappi-boxnet",
+ 2307: "pehelp",
+ 2308: "sdhelp",
+ 2309: "sdserver",
+ 2310: "sdclient",
+ 2311: "messageservice",
+ 2312: "wanscaler",
+ 2313: "iapp",
+ 2314: "cr-websystems",
+ 2315: "precise-sft",
+ 2316: "sent-lm",
+ 2317: "attachmate-g32",
+ 2318: "cadencecontrol",
+ 2319: "infolibria",
+ 2320: "siebel-ns",
+ 2321: "rdlap",
+ 2322: "ofsd",
+ 2323: "3d-nfsd",
+ 2324: "cosmocall",
+ 2325: "ansysli",
+ 2326: "idcp",
+ 2327: "xingcsm",
+ 2328: "netrix-sftm",
+ 2329: "nvd",
+ 2330: "tscchat",
+ 2331: "agentview",
+ 2332: "rcc-host",
+ 2333: "snapp",
+ 2334: "ace-client",
+ 2335: "ace-proxy",
+ 2336: "appleugcontrol",
+ 2337: "ideesrv",
+ 2338: "norton-lambert",
+ 2339: "3com-webview",
+ 2340: "wrs-registry",
+ 2341: "xiostatus",
+ 2342: "manage-exec",
+ 2343: "nati-logos",
+ 2344: "fcmsys",
+ 2345: "dbm",
+ 2346: "redstorm-join",
+ 2347: "redstorm-find",
+ 2348: "redstorm-info",
+ 2349: "redstorm-diag",
+ 2350: "psbserver",
+ 2351: "psrserver",
+ 2352: "pslserver",
+ 2353: "pspserver",
+ 2354: "psprserver",
+ 2355: "psdbserver",
+ 2356: "gxtelmd",
+ 2357: "unihub-server",
+ 2358: "futrix",
+ 2359: "flukeserver",
+ 2360: "nexstorindltd",
+ 2361: "tl1",
+ 2362: "digiman",
+ 2363: "mediacntrlnfsd",
+ 2364: "oi-2000",
+ 2365: "dbref",
+ 2366: "qip-login",
+ 2367: "service-ctrl",
+ 2368: "opentable",
+ 2370: "l3-hbmon",
+ 2371: "hp-rda",
+ 2372: "lanmessenger",
+ 2373: "remographlm",
+ 2374: "hydra",
+ 2375: "docker",
+ 2376: "docker-s",
+ 2377: "swarm",
+ 2379: "etcd-client",
+ 2380: "etcd-server",
+ 2381: "compaq-https",
+ 2382: "ms-olap3",
+ 2383: "ms-olap4",
+ 2384: "sd-request",
+ 2385: "sd-data",
+ 2386: "virtualtape",
+ 2387: "vsamredirector",
+ 2388: "mynahautostart",
+ 2389: "ovsessionmgr",
+ 2390: "rsmtp",
+ 2391: "3com-net-mgmt",
+ 2392: "tacticalauth",
+ 2393: "ms-olap1",
+ 2394: "ms-olap2",
+ 2395: "lan900-remote",
+ 2396: "wusage",
+ 2397: "ncl",
+ 2398: "orbiter",
+ 2399: "fmpro-fdal",
+ 2400: "opequus-server",
+ 2401: "cvspserver",
+ 2402: "taskmaster2000",
+ 2403: "taskmaster2000",
+ 2404: "iec-104",
+ 2405: "trc-netpoll",
+ 2406: "jediserver",
+ 2407: "orion",
+ 2408: "railgun-webaccl",
+ 2409: "sns-protocol",
+ 2410: "vrts-registry",
+ 2411: "netwave-ap-mgmt",
+ 2412: "cdn",
+ 2413: "orion-rmi-reg",
+ 2414: "beeyond",
+ 2415: "codima-rtp",
+ 2416: "rmtserver",
+ 2417: "composit-server",
+ 2418: "cas",
+ 2419: "attachmate-s2s",
+ 2420: "dslremote-mgmt",
+ 2421: "g-talk",
+ 2422: "crmsbits",
+ 2423: "rnrp",
+ 2424: "kofax-svr",
+ 2425: "fjitsuappmgr",
+ 2426: "vcmp",
+ 2427: "mgcp-gateway",
+ 2428: "ott",
+ 2429: "ft-role",
+ 2430: "venus",
+ 2431: "venus-se",
+ 2432: "codasrv",
+ 2433: "codasrv-se",
+ 2434: "pxc-epmap",
+ 2435: "optilogic",
+ 2436: "topx",
+ 2437: "unicontrol",
+ 2438: "msp",
+ 2439: "sybasedbsynch",
+ 2440: "spearway",
+ 2441: "pvsw-inet",
+ 2442: "netangel",
+ 2443: "powerclientcsf",
+ 2444: "btpp2sectrans",
+ 2445: "dtn1",
+ 2446: "bues-service",
+ 2447: "ovwdb",
+ 2448: "hpppssvr",
+ 2449: "ratl",
+ 2450: "netadmin",
+ 2451: "netchat",
+ 2452: "snifferclient",
+ 2453: "madge-ltd",
+ 2454: "indx-dds",
+ 2455: "wago-io-system",
+ 2456: "altav-remmgt",
+ 2457: "rapido-ip",
+ 2458: "griffin",
+ 2459: "community",
+ 2460: "ms-theater",
+ 2461: "qadmifoper",
+ 2462: "qadmifevent",
+ 2463: "lsi-raid-mgmt",
+ 2464: "direcpc-si",
+ 2465: "lbm",
+ 2466: "lbf",
+ 2467: "high-criteria",
+ 2468: "qip-msgd",
+ 2469: "mti-tcs-comm",
+ 2470: "taskman-port",
+ 2471: "seaodbc",
+ 2472: "c3",
+ 2473: "aker-cdp",
+ 2474: "vitalanalysis",
+ 2475: "ace-server",
+ 2476: "ace-svr-prop",
+ 2477: "ssm-cvs",
+ 2478: "ssm-cssps",
+ 2479: "ssm-els",
+ 2480: "powerexchange",
+ 2481: "giop",
+ 2482: "giop-ssl",
+ 2483: "ttc",
+ 2484: "ttc-ssl",
+ 2485: "netobjects1",
+ 2486: "netobjects2",
+ 2487: "pns",
+ 2488: "moy-corp",
+ 2489: "tsilb",
+ 2490: "qip-qdhcp",
+ 2491: "conclave-cpp",
+ 2492: "groove",
+ 2493: "talarian-mqs",
+ 2494: "bmc-ar",
+ 2495: "fast-rem-serv",
+ 2496: "dirgis",
+ 2497: "quaddb",
+ 2498: "odn-castraq",
+ 2499: "unicontrol",
+ 2500: "rtsserv",
+ 2501: "rtsclient",
+ 2502: "kentrox-prot",
+ 2503: "nms-dpnss",
+ 2504: "wlbs",
+ 2505: "ppcontrol",
+ 2506: "jbroker",
+ 2507: "spock",
+ 2508: "jdatastore",
+ 2509: "fjmpss",
+ 2510: "fjappmgrbulk",
+ 2511: "metastorm",
+ 2512: "citrixima",
+ 2513: "citrixadmin",
+ 2514: "facsys-ntp",
+ 2515: "facsys-router",
+ 2516: "maincontrol",
+ 2517: "call-sig-trans",
+ 2518: "willy",
+ 2519: "globmsgsvc",
+ 2520: "pvsw",
+ 2521: "adaptecmgr",
+ 2522: "windb",
+ 2523: "qke-llc-v3",
+ 2524: "optiwave-lm",
+ 2525: "ms-v-worlds",
+ 2526: "ema-sent-lm",
+ 2527: "iqserver",
+ 2528: "ncr-ccl",
+ 2529: "utsftp",
+ 2530: "vrcommerce",
+ 2531: "ito-e-gui",
+ 2532: "ovtopmd",
+ 2533: "snifferserver",
+ 2534: "combox-web-acc",
+ 2535: "madcap",
+ 2536: "btpp2audctr1",
+ 2537: "upgrade",
+ 2538: "vnwk-prapi",
+ 2539: "vsiadmin",
+ 2540: "lonworks",
+ 2541: "lonworks2",
+ 2542: "udrawgraph",
+ 2543: "reftek",
+ 2544: "novell-zen",
+ 2545: "sis-emt",
+ 2546: "vytalvaultbrtp",
+ 2547: "vytalvaultvsmp",
+ 2548: "vytalvaultpipe",
+ 2549: "ipass",
+ 2550: "ads",
+ 2551: "isg-uda-server",
+ 2552: "call-logging",
+ 2553: "efidiningport",
+ 2554: "vcnet-link-v10",
+ 2555: "compaq-wcp",
+ 2556: "nicetec-nmsvc",
+ 2557: "nicetec-mgmt",
+ 2558: "pclemultimedia",
+ 2559: "lstp",
+ 2560: "labrat",
+ 2561: "mosaixcc",
+ 2562: "delibo",
+ 2563: "cti-redwood",
+ 2564: "hp-3000-telnet",
+ 2565: "coord-svr",
+ 2566: "pcs-pcw",
+ 2567: "clp",
+ 2568: "spamtrap",
+ 2569: "sonuscallsig",
+ 2570: "hs-port",
+ 2571: "cecsvc",
+ 2572: "ibp",
+ 2573: "trustestablish",
+ 2574: "blockade-bpsp",
+ 2575: "hl7",
+ 2576: "tclprodebugger",
+ 2577: "scipticslsrvr",
+ 2578: "rvs-isdn-dcp",
+ 2579: "mpfoncl",
+ 2580: "tributary",
+ 2581: "argis-te",
+ 2582: "argis-ds",
+ 2583: "mon",
+ 2584: "cyaserv",
+ 2585: "netx-server",
+ 2586: "netx-agent",
+ 2587: "masc",
+ 2588: "privilege",
+ 2589: "quartus-tcl",
+ 2590: "idotdist",
+ 2591: "maytagshuffle",
+ 2592: "netrek",
+ 2593: "mns-mail",
+ 2594: "dts",
+ 2595: "worldfusion1",
+ 2596: "worldfusion2",
+ 2597: "homesteadglory",
+ 2598: "citriximaclient",
+ 2599: "snapd",
+ 2600: "hpstgmgr",
+ 2601: "discp-client",
+ 2602: "discp-server",
+ 2603: "servicemeter",
+ 2604: "nsc-ccs",
+ 2605: "nsc-posa",
+ 2606: "netmon",
+ 2607: "connection",
+ 2608: "wag-service",
+ 2609: "system-monitor",
+ 2610: "versa-tek",
+ 2611: "lionhead",
+ 2612: "qpasa-agent",
+ 2613: "smntubootstrap",
+ 2614: "neveroffline",
+ 2615: "firepower",
+ 2616: "appswitch-emp",
+ 2617: "cmadmin",
+ 2618: "priority-e-com",
+ 2619: "bruce",
+ 2620: "lpsrecommender",
+ 2621: "miles-apart",
+ 2622: "metricadbc",
+ 2623: "lmdp",
+ 2624: "aria",
+ 2625: "blwnkl-port",
+ 2626: "gbjd816",
+ 2627: "moshebeeri",
+ 2628: "dict",
+ 2629: "sitaraserver",
+ 2630: "sitaramgmt",
+ 2631: "sitaradir",
+ 2632: "irdg-post",
+ 2633: "interintelli",
+ 2634: "pk-electronics",
+ 2635: "backburner",
+ 2636: "solve",
+ 2637: "imdocsvc",
+ 2638: "sybaseanywhere",
+ 2639: "aminet",
+ 2640: "ami-control",
+ 2641: "hdl-srv",
+ 2642: "tragic",
+ 2643: "gte-samp",
+ 2644: "travsoft-ipx-t",
+ 2645: "novell-ipx-cmd",
+ 2646: "and-lm",
+ 2647: "syncserver",
+ 2648: "upsnotifyprot",
+ 2649: "vpsipport",
+ 2650: "eristwoguns",
+ 2651: "ebinsite",
+ 2652: "interpathpanel",
+ 2653: "sonus",
+ 2654: "corel-vncadmin",
+ 2655: "unglue",
+ 2656: "kana",
+ 2657: "sns-dispatcher",
+ 2658: "sns-admin",
+ 2659: "sns-query",
+ 2660: "gcmonitor",
+ 2661: "olhost",
+ 2662: "bintec-capi",
+ 2663: "bintec-tapi",
+ 2664: "patrol-mq-gm",
+ 2665: "patrol-mq-nm",
+ 2666: "extensis",
+ 2667: "alarm-clock-s",
+ 2668: "alarm-clock-c",
+ 2669: "toad",
+ 2670: "tve-announce",
+ 2671: "newlixreg",
+ 2672: "nhserver",
+ 2673: "firstcall42",
+ 2674: "ewnn",
+ 2675: "ttc-etap",
+ 2676: "simslink",
+ 2677: "gadgetgate1way",
+ 2678: "gadgetgate2way",
+ 2679: "syncserverssl",
+ 2680: "pxc-sapxom",
+ 2681: "mpnjsomb",
+ 2683: "ncdloadbalance",
+ 2684: "mpnjsosv",
+ 2685: "mpnjsocl",
+ 2686: "mpnjsomg",
+ 2687: "pq-lic-mgmt",
+ 2688: "md-cg-http",
+ 2689: "fastlynx",
+ 2690: "hp-nnm-data",
+ 2691: "itinternet",
+ 2692: "admins-lms",
+ 2694: "pwrsevent",
+ 2695: "vspread",
+ 2696: "unifyadmin",
+ 2697: "oce-snmp-trap",
+ 2698: "mck-ivpip",
+ 2699: "csoft-plusclnt",
+ 2700: "tqdata",
+ 2701: "sms-rcinfo",
+ 2702: "sms-xfer",
+ 2703: "sms-chat",
+ 2704: "sms-remctrl",
+ 2705: "sds-admin",
+ 2706: "ncdmirroring",
+ 2707: "emcsymapiport",
+ 2708: "banyan-net",
+ 2709: "supermon",
+ 2710: "sso-service",
+ 2711: "sso-control",
+ 2712: "aocp",
+ 2713: "raventbs",
+ 2714: "raventdm",
+ 2715: "hpstgmgr2",
+ 2716: "inova-ip-disco",
+ 2717: "pn-requester",
+ 2718: "pn-requester2",
+ 2719: "scan-change",
+ 2720: "wkars",
+ 2721: "smart-diagnose",
+ 2722: "proactivesrvr",
+ 2723: "watchdog-nt",
+ 2724: "qotps",
+ 2725: "msolap-ptp2",
+ 2726: "tams",
+ 2727: "mgcp-callagent",
+ 2728: "sqdr",
+ 2729: "tcim-control",
+ 2730: "nec-raidplus",
+ 2731: "fyre-messanger",
+ 2732: "g5m",
+ 2733: "signet-ctf",
+ 2734: "ccs-software",
+ 2735: "netiq-mc",
+ 2736: "radwiz-nms-srv",
+ 2737: "srp-feedback",
+ 2738: "ndl-tcp-ois-gw",
+ 2739: "tn-timing",
+ 2740: "alarm",
+ 2741: "tsb",
+ 2742: "tsb2",
+ 2743: "murx",
+ 2744: "honyaku",
+ 2745: "urbisnet",
+ 2746: "cpudpencap",
+ 2747: "fjippol-swrly",
+ 2748: "fjippol-polsvr",
+ 2749: "fjippol-cnsl",
+ 2750: "fjippol-port1",
+ 2751: "fjippol-port2",
+ 2752: "rsisysaccess",
+ 2753: "de-spot",
+ 2754: "apollo-cc",
+ 2755: "expresspay",
+ 2756: "simplement-tie",
+ 2757: "cnrp",
+ 2758: "apollo-status",
+ 2759: "apollo-gms",
+ 2760: "sabams",
+ 2761: "dicom-iscl",
+ 2762: "dicom-tls",
+ 2763: "desktop-dna",
+ 2764: "data-insurance",
+ 2765: "qip-audup",
+ 2766: "compaq-scp",
+ 2767: "uadtc",
+ 2768: "uacs",
+ 2769: "exce",
+ 2770: "veronica",
+ 2771: "vergencecm",
+ 2772: "auris",
+ 2773: "rbakcup1",
+ 2774: "rbakcup2",
+ 2775: "smpp",
+ 2776: "ridgeway1",
+ 2777: "ridgeway2",
+ 2778: "gwen-sonya",
+ 2779: "lbc-sync",
+ 2780: "lbc-control",
+ 2781: "whosells",
+ 2782: "everydayrc",
+ 2783: "aises",
+ 2784: "www-dev",
+ 2785: "aic-np",
+ 2786: "aic-oncrpc",
+ 2787: "piccolo",
+ 2788: "fryeserv",
+ 2789: "media-agent",
+ 2790: "plgproxy",
+ 2791: "mtport-regist",
+ 2792: "f5-globalsite",
+ 2793: "initlsmsad",
+ 2795: "livestats",
+ 2796: "ac-tech",
+ 2797: "esp-encap",
+ 2798: "tmesis-upshot",
+ 2799: "icon-discover",
+ 2800: "acc-raid",
+ 2801: "igcp",
+ 2802: "veritas-tcp1",
+ 2803: "btprjctrl",
+ 2804: "dvr-esm",
+ 2805: "wta-wsp-s",
+ 2806: "cspuni",
+ 2807: "cspmulti",
+ 2808: "j-lan-p",
+ 2809: "corbaloc",
+ 2810: "netsteward",
+ 2811: "gsiftp",
+ 2812: "atmtcp",
+ 2813: "llm-pass",
+ 2814: "llm-csv",
+ 2815: "lbc-measure",
+ 2816: "lbc-watchdog",
+ 2817: "nmsigport",
+ 2818: "rmlnk",
+ 2819: "fc-faultnotify",
+ 2820: "univision",
+ 2821: "vrts-at-port",
+ 2822: "ka0wuc",
+ 2823: "cqg-netlan",
+ 2824: "cqg-netlan-1",
+ 2826: "slc-systemlog",
+ 2827: "slc-ctrlrloops",
+ 2828: "itm-lm",
+ 2829: "silkp1",
+ 2830: "silkp2",
+ 2831: "silkp3",
+ 2832: "silkp4",
+ 2833: "glishd",
+ 2834: "evtp",
+ 2835: "evtp-data",
+ 2836: "catalyst",
+ 2837: "repliweb",
+ 2838: "starbot",
+ 2839: "nmsigport",
+ 2840: "l3-exprt",
+ 2841: "l3-ranger",
+ 2842: "l3-hawk",
+ 2843: "pdnet",
+ 2844: "bpcp-poll",
+ 2845: "bpcp-trap",
+ 2846: "aimpp-hello",
+ 2847: "aimpp-port-req",
+ 2848: "amt-blc-port",
+ 2849: "fxp",
+ 2850: "metaconsole",
+ 2851: "webemshttp",
+ 2852: "bears-01",
+ 2853: "ispipes",
+ 2854: "infomover",
+ 2855: "msrp",
+ 2856: "cesdinv",
+ 2857: "simctlp",
+ 2858: "ecnp",
+ 2859: "activememory",
+ 2860: "dialpad-voice1",
+ 2861: "dialpad-voice2",
+ 2862: "ttg-protocol",
+ 2863: "sonardata",
+ 2864: "astromed-main",
+ 2865: "pit-vpn",
+ 2866: "iwlistener",
+ 2867: "esps-portal",
+ 2868: "npep-messaging",
+ 2869: "icslap",
+ 2870: "daishi",
+ 2871: "msi-selectplay",
+ 2872: "radix",
+ 2874: "dxmessagebase1",
+ 2875: "dxmessagebase2",
+ 2876: "sps-tunnel",
+ 2877: "bluelance",
+ 2878: "aap",
+ 2879: "ucentric-ds",
+ 2880: "synapse",
+ 2881: "ndsp",
+ 2882: "ndtp",
+ 2883: "ndnp",
+ 2884: "flashmsg",
+ 2885: "topflow",
+ 2886: "responselogic",
+ 2887: "aironetddp",
+ 2888: "spcsdlobby",
+ 2889: "rsom",
+ 2890: "cspclmulti",
+ 2891: "cinegrfx-elmd",
+ 2892: "snifferdata",
+ 2893: "vseconnector",
+ 2894: "abacus-remote",
+ 2895: "natuslink",
+ 2896: "ecovisiong6-1",
+ 2897: "citrix-rtmp",
+ 2898: "appliance-cfg",
+ 2899: "powergemplus",
+ 2900: "quicksuite",
+ 2901: "allstorcns",
+ 2902: "netaspi",
+ 2903: "suitcase",
+ 2904: "m2ua",
+ 2905: "m3ua",
+ 2906: "caller9",
+ 2907: "webmethods-b2b",
+ 2908: "mao",
+ 2909: "funk-dialout",
+ 2910: "tdaccess",
+ 2911: "blockade",
+ 2912: "epicon",
+ 2913: "boosterware",
+ 2914: "gamelobby",
+ 2915: "tksocket",
+ 2916: "elvin-server",
+ 2917: "elvin-client",
+ 2918: "kastenchasepad",
+ 2919: "roboer",
+ 2920: "roboeda",
+ 2921: "cesdcdman",
+ 2922: "cesdcdtrn",
+ 2923: "wta-wsp-wtp-s",
+ 2924: "precise-vip",
+ 2926: "mobile-file-dl",
+ 2927: "unimobilectrl",
+ 2928: "redstone-cpss",
+ 2929: "amx-webadmin",
+ 2930: "amx-weblinx",
+ 2931: "circle-x",
+ 2932: "incp",
+ 2933: "4-tieropmgw",
+ 2934: "4-tieropmcli",
+ 2935: "qtp",
+ 2936: "otpatch",
+ 2937: "pnaconsult-lm",
+ 2938: "sm-pas-1",
+ 2939: "sm-pas-2",
+ 2940: "sm-pas-3",
+ 2941: "sm-pas-4",
+ 2942: "sm-pas-5",
+ 2943: "ttnrepository",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 2946: "fjsvmpor",
+ 2947: "gpsd",
+ 2948: "wap-push",
+ 2949: "wap-pushsecure",
+ 2950: "esip",
+ 2951: "ottp",
+ 2952: "mpfwsas",
+ 2953: "ovalarmsrv",
+ 2954: "ovalarmsrv-cmd",
+ 2955: "csnotify",
+ 2956: "ovrimosdbman",
+ 2957: "jmact5",
+ 2958: "jmact6",
+ 2959: "rmopagt",
+ 2960: "dfoxserver",
+ 2961: "boldsoft-lm",
+ 2962: "iph-policy-cli",
+ 2963: "iph-policy-adm",
+ 2964: "bullant-srap",
+ 2965: "bullant-rap",
+ 2966: "idp-infotrieve",
+ 2967: "ssc-agent",
+ 2968: "enpp",
+ 2969: "essp",
+ 2970: "index-net",
+ 2971: "netclip",
+ 2972: "pmsm-webrctl",
+ 2973: "svnetworks",
+ 2974: "signal",
+ 2975: "fjmpcm",
+ 2976: "cns-srv-port",
+ 2977: "ttc-etap-ns",
+ 2978: "ttc-etap-ds",
+ 2979: "h263-video",
+ 2980: "wimd",
+ 2981: "mylxamport",
+ 2982: "iwb-whiteboard",
+ 2983: "netplan",
+ 2984: "hpidsadmin",
+ 2985: "hpidsagent",
+ 2986: "stonefalls",
+ 2987: "identify",
+ 2988: "hippad",
+ 2989: "zarkov",
+ 2990: "boscap",
+ 2991: "wkstn-mon",
+ 2992: "avenyo",
+ 2993: "veritas-vis1",
+ 2994: "veritas-vis2",
+ 2995: "idrs",
+ 2996: "vsixml",
+ 2997: "rebol",
+ 2998: "realsecure",
+ 2999: "remoteware-un",
+ 3000: "hbci",
+ 3001: "origo-native",
+ 3002: "exlm-agent",
+ 3003: "cgms",
+ 3004: "csoftragent",
+ 3005: "geniuslm",
+ 3006: "ii-admin",
+ 3007: "lotusmtap",
+ 3008: "midnight-tech",
+ 3009: "pxc-ntfy",
+ 3010: "gw",
+ 3011: "trusted-web",
+ 3012: "twsdss",
+ 3013: "gilatskysurfer",
+ 3014: "broker-service",
+ 3015: "nati-dstp",
+ 3016: "notify-srvr",
+ 3017: "event-listener",
+ 3018: "srvc-registry",
+ 3019: "resource-mgr",
+ 3020: "cifs",
+ 3021: "agriserver",
+ 3022: "csregagent",
+ 3023: "magicnotes",
+ 3024: "nds-sso",
+ 3025: "arepa-raft",
+ 3026: "agri-gateway",
+ 3027: "LiebDevMgmt-C",
+ 3028: "LiebDevMgmt-DM",
+ 3029: "LiebDevMgmt-A",
+ 3030: "arepa-cas",
+ 3031: "eppc",
+ 3032: "redwood-chat",
+ 3033: "pdb",
+ 3034: "osmosis-aeea",
+ 3035: "fjsv-gssagt",
+ 3036: "hagel-dump",
+ 3037: "hp-san-mgmt",
+ 3038: "santak-ups",
+ 3039: "cogitate",
+ 3040: "tomato-springs",
+ 3041: "di-traceware",
+ 3042: "journee",
+ 3043: "brp",
+ 3044: "epp",
+ 3045: "responsenet",
+ 3046: "di-ase",
+ 3047: "hlserver",
+ 3048: "pctrader",
+ 3049: "nsws",
+ 3050: "gds-db",
+ 3051: "galaxy-server",
+ 3052: "apc-3052",
+ 3053: "dsom-server",
+ 3054: "amt-cnf-prot",
+ 3055: "policyserver",
+ 3056: "cdl-server",
+ 3057: "goahead-fldup",
+ 3058: "videobeans",
+ 3059: "qsoft",
+ 3060: "interserver",
+ 3061: "cautcpd",
+ 3062: "ncacn-ip-tcp",
+ 3063: "ncadg-ip-udp",
+ 3064: "rprt",
+ 3065: "slinterbase",
+ 3066: "netattachsdmp",
+ 3067: "fjhpjp",
+ 3068: "ls3bcast",
+ 3069: "ls3",
+ 3070: "mgxswitch",
+ 3071: "xplat-replicate",
+ 3072: "csd-monitor",
+ 3073: "vcrp",
+ 3074: "xbox",
+ 3075: "orbix-locator",
+ 3076: "orbix-config",
+ 3077: "orbix-loc-ssl",
+ 3078: "orbix-cfg-ssl",
+ 3079: "lv-frontpanel",
+ 3080: "stm-pproc",
+ 3081: "tl1-lv",
+ 3082: "tl1-raw",
+ 3083: "tl1-telnet",
+ 3084: "itm-mccs",
+ 3085: "pcihreq",
+ 3086: "jdl-dbkitchen",
+ 3087: "asoki-sma",
+ 3088: "xdtp",
+ 3089: "ptk-alink",
+ 3090: "stss",
+ 3091: "1ci-smcs",
+ 3093: "rapidmq-center",
+ 3094: "rapidmq-reg",
+ 3095: "panasas",
+ 3096: "ndl-aps",
+ 3098: "umm-port",
+ 3099: "chmd",
+ 3100: "opcon-xps",
+ 3101: "hp-pxpib",
+ 3102: "slslavemon",
+ 3103: "autocuesmi",
+ 3104: "autocuelog",
+ 3105: "cardbox",
+ 3106: "cardbox-http",
+ 3107: "business",
+ 3108: "geolocate",
+ 3109: "personnel",
+ 3110: "sim-control",
+ 3111: "wsynch",
+ 3112: "ksysguard",
+ 3113: "cs-auth-svr",
+ 3114: "ccmad",
+ 3115: "mctet-master",
+ 3116: "mctet-gateway",
+ 3117: "mctet-jserv",
+ 3118: "pkagent",
+ 3119: "d2000kernel",
+ 3120: "d2000webserver",
+ 3121: "pcmk-remote",
+ 3122: "vtr-emulator",
+ 3123: "edix",
+ 3124: "beacon-port",
+ 3125: "a13-an",
+ 3127: "ctx-bridge",
+ 3128: "ndl-aas",
+ 3129: "netport-id",
+ 3130: "icpv2",
+ 3131: "netbookmark",
+ 3132: "ms-rule-engine",
+ 3133: "prism-deploy",
+ 3134: "ecp",
+ 3135: "peerbook-port",
+ 3136: "grubd",
+ 3137: "rtnt-1",
+ 3138: "rtnt-2",
+ 3139: "incognitorv",
+ 3140: "ariliamulti",
+ 3141: "vmodem",
+ 3142: "rdc-wh-eos",
+ 3143: "seaview",
+ 3144: "tarantella",
+ 3145: "csi-lfap",
+ 3146: "bears-02",
+ 3147: "rfio",
+ 3148: "nm-game-admin",
+ 3149: "nm-game-server",
+ 3150: "nm-asses-admin",
+ 3151: "nm-assessor",
+ 3152: "feitianrockey",
+ 3153: "s8-client-port",
+ 3154: "ccmrmi",
+ 3155: "jpegmpeg",
+ 3156: "indura",
+ 3157: "e3consultants",
+ 3158: "stvp",
+ 3159: "navegaweb-port",
+ 3160: "tip-app-server",
+ 3161: "doc1lm",
+ 3162: "sflm",
+ 3163: "res-sap",
+ 3164: "imprs",
+ 3165: "newgenpay",
+ 3166: "sossecollector",
+ 3167: "nowcontact",
+ 3168: "poweronnud",
+ 3169: "serverview-as",
+ 3170: "serverview-asn",
+ 3171: "serverview-gf",
+ 3172: "serverview-rm",
+ 3173: "serverview-icc",
+ 3174: "armi-server",
+ 3175: "t1-e1-over-ip",
+ 3176: "ars-master",
+ 3177: "phonex-port",
+ 3178: "radclientport",
+ 3179: "h2gf-w-2m",
+ 3180: "mc-brk-srv",
+ 3181: "bmcpatrolagent",
+ 3182: "bmcpatrolrnvu",
+ 3183: "cops-tls",
+ 3184: "apogeex-port",
+ 3185: "smpppd",
+ 3186: "iiw-port",
+ 3187: "odi-port",
+ 3188: "brcm-comm-port",
+ 3189: "pcle-infex",
+ 3190: "csvr-proxy",
+ 3191: "csvr-sslproxy",
+ 3192: "firemonrcc",
+ 3193: "spandataport",
+ 3194: "magbind",
+ 3195: "ncu-1",
+ 3196: "ncu-2",
+ 3197: "embrace-dp-s",
+ 3198: "embrace-dp-c",
+ 3199: "dmod-workspace",
+ 3200: "tick-port",
+ 3201: "cpq-tasksmart",
+ 3202: "intraintra",
+ 3203: "netwatcher-mon",
+ 3204: "netwatcher-db",
+ 3205: "isns",
+ 3206: "ironmail",
+ 3207: "vx-auth-port",
+ 3208: "pfu-prcallback",
+ 3209: "netwkpathengine",
+ 3210: "flamenco-proxy",
+ 3211: "avsecuremgmt",
+ 3212: "surveyinst",
+ 3213: "neon24x7",
+ 3214: "jmq-daemon-1",
+ 3215: "jmq-daemon-2",
+ 3216: "ferrari-foam",
+ 3217: "unite",
+ 3218: "smartpackets",
+ 3219: "wms-messenger",
+ 3220: "xnm-ssl",
+ 3221: "xnm-clear-text",
+ 3222: "glbp",
+ 3223: "digivote",
+ 3224: "aes-discovery",
+ 3225: "fcip-port",
+ 3226: "isi-irp",
+ 3227: "dwnmshttp",
+ 3228: "dwmsgserver",
+ 3229: "global-cd-port",
+ 3230: "sftdst-port",
+ 3231: "vidigo",
+ 3232: "mdtp",
+ 3233: "whisker",
+ 3234: "alchemy",
+ 3235: "mdap-port",
+ 3236: "apparenet-ts",
+ 3237: "apparenet-tps",
+ 3238: "apparenet-as",
+ 3239: "apparenet-ui",
+ 3240: "triomotion",
+ 3241: "sysorb",
+ 3242: "sdp-id-port",
+ 3243: "timelot",
+ 3244: "onesaf",
+ 3245: "vieo-fe",
+ 3246: "dvt-system",
+ 3247: "dvt-data",
+ 3248: "procos-lm",
+ 3249: "ssp",
+ 3250: "hicp",
+ 3251: "sysscanner",
+ 3252: "dhe",
+ 3253: "pda-data",
+ 3254: "pda-sys",
+ 3255: "semaphore",
+ 3256: "cpqrpm-agent",
+ 3257: "cpqrpm-server",
+ 3258: "ivecon-port",
+ 3259: "epncdp2",
+ 3260: "iscsi-target",
+ 3261: "winshadow",
+ 3262: "necp",
+ 3263: "ecolor-imager",
+ 3264: "ccmail",
+ 3265: "altav-tunnel",
+ 3266: "ns-cfg-server",
+ 3267: "ibm-dial-out",
+ 3268: "msft-gc",
+ 3269: "msft-gc-ssl",
+ 3270: "verismart",
+ 3271: "csoft-prev",
+ 3272: "user-manager",
+ 3273: "sxmp",
+ 3274: "ordinox-server",
+ 3275: "samd",
+ 3276: "maxim-asics",
+ 3277: "awg-proxy",
+ 3278: "lkcmserver",
+ 3279: "admind",
+ 3280: "vs-server",
+ 3281: "sysopt",
+ 3282: "datusorb",
+ 3283: "Apple Remote Desktop (Net Assistant)",
+ 3284: "4talk",
+ 3285: "plato",
+ 3286: "e-net",
+ 3287: "directvdata",
+ 3288: "cops",
+ 3289: "enpc",
+ 3290: "caps-lm",
+ 3291: "sah-lm",
+ 3292: "cart-o-rama",
+ 3293: "fg-fps",
+ 3294: "fg-gip",
+ 3295: "dyniplookup",
+ 3296: "rib-slm",
+ 3297: "cytel-lm",
+ 3298: "deskview",
+ 3299: "pdrncs",
+ 3300: "ceph",
+ 3302: "mcs-fastmail",
+ 3303: "opsession-clnt",
+ 3304: "opsession-srvr",
+ 3305: "odette-ftp",
+ 3306: "mysql",
+ 3307: "opsession-prxy",
+ 3308: "tns-server",
+ 3309: "tns-adv",
+ 3310: "dyna-access",
+ 3311: "mcns-tel-ret",
+ 3312: "appman-server",
+ 3313: "uorb",
+ 3314: "uohost",
+ 3315: "cdid",
+ 3316: "aicc-cmi",
+ 3317: "vsaiport",
+ 3318: "ssrip",
+ 3319: "sdt-lmd",
+ 3320: "officelink2000",
+ 3321: "vnsstr",
+ 3326: "sftu",
+ 3327: "bbars",
+ 3328: "egptlm",
+ 3329: "hp-device-disc",
+ 3330: "mcs-calypsoicf",
+ 3331: "mcs-messaging",
+ 3332: "mcs-mailsvr",
+ 3333: "dec-notes",
+ 3334: "directv-web",
+ 3335: "directv-soft",
+ 3336: "directv-tick",
+ 3337: "directv-catlg",
+ 3338: "anet-b",
+ 3339: "anet-l",
+ 3340: "anet-m",
+ 3341: "anet-h",
+ 3342: "webtie",
+ 3343: "ms-cluster-net",
+ 3344: "bnt-manager",
+ 3345: "influence",
+ 3346: "trnsprntproxy",
+ 3347: "phoenix-rpc",
+ 3348: "pangolin-laser",
+ 3349: "chevinservices",
+ 3350: "findviatv",
+ 3351: "btrieve",
+ 3352: "ssql",
+ 3353: "fatpipe",
+ 3354: "suitjd",
+ 3355: "ordinox-dbase",
+ 3356: "upnotifyps",
+ 3357: "adtech-test",
+ 3358: "mpsysrmsvr",
+ 3359: "wg-netforce",
+ 3360: "kv-server",
+ 3361: "kv-agent",
+ 3362: "dj-ilm",
+ 3363: "nati-vi-server",
+ 3364: "creativeserver",
+ 3365: "contentserver",
+ 3366: "creativepartnr",
+ 3372: "tip2",
+ 3373: "lavenir-lm",
+ 3374: "cluster-disc",
+ 3375: "vsnm-agent",
+ 3376: "cdbroker",
+ 3377: "cogsys-lm",
+ 3378: "wsicopy",
+ 3379: "socorfs",
+ 3380: "sns-channels",
+ 3381: "geneous",
+ 3382: "fujitsu-neat",
+ 3383: "esp-lm",
+ 3384: "hp-clic",
+ 3385: "qnxnetman",
+ 3386: "gprs-data",
+ 3387: "backroomnet",
+ 3388: "cbserver",
+ 3389: "ms-wbt-server",
+ 3390: "dsc",
+ 3391: "savant",
+ 3392: "efi-lm",
+ 3393: "d2k-tapestry1",
+ 3394: "d2k-tapestry2",
+ 3395: "dyna-lm",
+ 3396: "printer-agent",
+ 3397: "cloanto-lm",
+ 3398: "mercantile",
+ 3399: "csms",
+ 3400: "csms2",
+ 3401: "filecast",
+ 3402: "fxaengine-net",
+ 3405: "nokia-ann-ch1",
+ 3406: "nokia-ann-ch2",
+ 3407: "ldap-admin",
+ 3408: "BESApi",
+ 3409: "networklens",
+ 3410: "networklenss",
+ 3411: "biolink-auth",
+ 3412: "xmlblaster",
+ 3413: "svnet",
+ 3414: "wip-port",
+ 3415: "bcinameservice",
+ 3416: "commandport",
+ 3417: "csvr",
+ 3418: "rnmap",
+ 3419: "softaudit",
+ 3420: "ifcp-port",
+ 3421: "bmap",
+ 3422: "rusb-sys-port",
+ 3423: "xtrm",
+ 3424: "xtrms",
+ 3425: "agps-port",
+ 3426: "arkivio",
+ 3427: "websphere-snmp",
+ 3428: "twcss",
+ 3429: "gcsp",
+ 3430: "ssdispatch",
+ 3431: "ndl-als",
+ 3432: "osdcp",
+ 3433: "opnet-smp",
+ 3434: "opencm",
+ 3435: "pacom",
+ 3436: "gc-config",
+ 3437: "autocueds",
+ 3438: "spiral-admin",
+ 3439: "hri-port",
+ 3440: "ans-console",
+ 3441: "connect-client",
+ 3442: "connect-server",
+ 3443: "ov-nnm-websrv",
+ 3444: "denali-server",
+ 3445: "monp",
+ 3446: "3comfaxrpc",
+ 3447: "directnet",
+ 3448: "dnc-port",
+ 3449: "hotu-chat",
+ 3450: "castorproxy",
+ 3451: "asam",
+ 3452: "sabp-signal",
+ 3453: "pscupd",
+ 3454: "mira",
+ 3455: "prsvp",
+ 3456: "vat",
+ 3457: "vat-control",
+ 3458: "d3winosfi",
+ 3459: "integral",
+ 3460: "edm-manager",
+ 3461: "edm-stager",
+ 3462: "edm-std-notify",
+ 3463: "edm-adm-notify",
+ 3464: "edm-mgr-sync",
+ 3465: "edm-mgr-cntrl",
+ 3466: "workflow",
+ 3467: "rcst",
+ 3468: "ttcmremotectrl",
+ 3469: "pluribus",
+ 3470: "jt400",
+ 3471: "jt400-ssl",
+ 3472: "jaugsremotec-1",
+ 3473: "jaugsremotec-2",
+ 3474: "ttntspauto",
+ 3475: "genisar-port",
+ 3476: "nppmp",
+ 3477: "ecomm",
+ 3478: "stun",
+ 3479: "twrpc",
+ 3480: "plethora",
+ 3481: "cleanerliverc",
+ 3482: "vulture",
+ 3483: "slim-devices",
+ 3484: "gbs-stp",
+ 3485: "celatalk",
+ 3486: "ifsf-hb-port",
+ 3487: "ltctcp",
+ 3488: "fs-rh-srv",
+ 3489: "dtp-dia",
+ 3490: "colubris",
+ 3491: "swr-port",
+ 3492: "tvdumtray-port",
+ 3493: "nut",
+ 3494: "ibm3494",
+ 3495: "seclayer-tcp",
+ 3496: "seclayer-tls",
+ 3497: "ipether232port",
+ 3498: "dashpas-port",
+ 3499: "sccip-media",
+ 3500: "rtmp-port",
+ 3501: "isoft-p2p",
+ 3502: "avinstalldisc",
+ 3503: "lsp-ping",
+ 3504: "ironstorm",
+ 3505: "ccmcomm",
+ 3506: "apc-3506",
+ 3507: "nesh-broker",
+ 3508: "interactionweb",
+ 3509: "vt-ssl",
+ 3510: "xss-port",
+ 3511: "webmail-2",
+ 3512: "aztec",
+ 3513: "arcpd",
+ 3514: "must-p2p",
+ 3515: "must-backplane",
+ 3516: "smartcard-port",
+ 3517: "802-11-iapp",
+ 3518: "artifact-msg",
+ 3519: "nvmsgd",
+ 3520: "galileolog",
+ 3521: "mc3ss",
+ 3522: "nssocketport",
+ 3523: "odeumservlink",
+ 3524: "ecmport",
+ 3525: "eisport",
+ 3526: "starquiz-port",
+ 3527: "beserver-msg-q",
+ 3528: "jboss-iiop",
+ 3529: "jboss-iiop-ssl",
+ 3530: "gf",
+ 3531: "joltid",
+ 3532: "raven-rmp",
+ 3533: "raven-rdp",
+ 3534: "urld-port",
+ 3535: "ms-la",
+ 3536: "snac",
+ 3537: "ni-visa-remote",
+ 3538: "ibm-diradm",
+ 3539: "ibm-diradm-ssl",
+ 3540: "pnrp-port",
+ 3541: "voispeed-port",
+ 3542: "hacl-monitor",
+ 3543: "qftest-lookup",
+ 3544: "teredo",
+ 3545: "camac",
+ 3547: "symantec-sim",
+ 3548: "interworld",
+ 3549: "tellumat-nms",
+ 3550: "ssmpp",
+ 3551: "apcupsd",
+ 3552: "taserver",
+ 3553: "rbr-discovery",
+ 3554: "questnotify",
+ 3555: "razor",
+ 3556: "sky-transport",
+ 3557: "personalos-001",
+ 3558: "mcp-port",
+ 3559: "cctv-port",
+ 3560: "iniserve-port",
+ 3561: "bmc-onekey",
+ 3562: "sdbproxy",
+ 3563: "watcomdebug",
+ 3564: "esimport",
+ 3565: "m2pa",
+ 3566: "quest-data-hub",
+ 3567: "dof-eps",
+ 3568: "dof-tunnel-sec",
+ 3569: "mbg-ctrl",
+ 3570: "mccwebsvr-port",
+ 3571: "megardsvr-port",
+ 3572: "megaregsvrport",
+ 3573: "tag-ups-1",
+ 3574: "dmaf-server",
+ 3575: "ccm-port",
+ 3576: "cmc-port",
+ 3577: "config-port",
+ 3578: "data-port",
+ 3579: "ttat3lb",
+ 3580: "nati-svrloc",
+ 3581: "kfxaclicensing",
+ 3582: "press",
+ 3583: "canex-watch",
+ 3584: "u-dbap",
+ 3585: "emprise-lls",
+ 3586: "emprise-lsc",
+ 3587: "p2pgroup",
+ 3588: "sentinel",
+ 3589: "isomair",
+ 3590: "wv-csp-sms",
+ 3591: "gtrack-server",
+ 3592: "gtrack-ne",
+ 3593: "bpmd",
+ 3594: "mediaspace",
+ 3595: "shareapp",
+ 3596: "iw-mmogame",
+ 3597: "a14",
+ 3598: "a15",
+ 3599: "quasar-server",
+ 3600: "trap-daemon",
+ 3601: "visinet-gui",
+ 3602: "infiniswitchcl",
+ 3603: "int-rcv-cntrl",
+ 3604: "bmc-jmx-port",
+ 3605: "comcam-io",
+ 3606: "splitlock",
+ 3607: "precise-i3",
+ 3608: "trendchip-dcp",
+ 3609: "cpdi-pidas-cm",
+ 3610: "echonet",
+ 3611: "six-degrees",
+ 3612: "hp-dataprotect",
+ 3613: "alaris-disc",
+ 3614: "sigma-port",
+ 3615: "start-network",
+ 3616: "cd3o-protocol",
+ 3617: "sharp-server",
+ 3618: "aairnet-1",
+ 3619: "aairnet-2",
+ 3620: "ep-pcp",
+ 3621: "ep-nsp",
+ 3622: "ff-lr-port",
+ 3623: "haipe-discover",
+ 3624: "dist-upgrade",
+ 3625: "volley",
+ 3626: "bvcdaemon-port",
+ 3627: "jamserverport",
+ 3628: "ept-machine",
+ 3629: "escvpnet",
+ 3630: "cs-remote-db",
+ 3631: "cs-services",
+ 3632: "distcc",
+ 3633: "wacp",
+ 3634: "hlibmgr",
+ 3635: "sdo",
+ 3636: "servistaitsm",
+ 3637: "scservp",
+ 3638: "ehp-backup",
+ 3639: "xap-ha",
+ 3640: "netplay-port1",
+ 3641: "netplay-port2",
+ 3642: "juxml-port",
+ 3643: "audiojuggler",
+ 3644: "ssowatch",
+ 3645: "cyc",
+ 3646: "xss-srv-port",
+ 3647: "splitlock-gw",
+ 3648: "fjcp",
+ 3649: "nmmp",
+ 3650: "prismiq-plugin",
+ 3651: "xrpc-registry",
+ 3652: "vxcrnbuport",
+ 3653: "tsp",
+ 3654: "vaprtm",
+ 3655: "abatemgr",
+ 3656: "abatjss",
+ 3657: "immedianet-bcn",
+ 3658: "ps-ams",
+ 3659: "apple-sasl",
+ 3660: "can-nds-ssl",
+ 3661: "can-ferret-ssl",
+ 3662: "pserver",
+ 3663: "dtp",
+ 3664: "ups-engine",
+ 3665: "ent-engine",
+ 3666: "eserver-pap",
+ 3667: "infoexch",
+ 3668: "dell-rm-port",
+ 3669: "casanswmgmt",
+ 3670: "smile",
+ 3671: "efcp",
+ 3672: "lispworks-orb",
+ 3673: "mediavault-gui",
+ 3674: "wininstall-ipc",
+ 3675: "calltrax",
+ 3676: "va-pacbase",
+ 3677: "roverlog",
+ 3678: "ipr-dglt",
+ 3679: "Escale (Newton Dock)",
+ 3680: "npds-tracker",
+ 3681: "bts-x73",
+ 3682: "cas-mapi",
+ 3683: "bmc-ea",
+ 3684: "faxstfx-port",
+ 3685: "dsx-agent",
+ 3686: "tnmpv2",
+ 3687: "simple-push",
+ 3688: "simple-push-s",
+ 3689: "daap",
+ 3690: "svn",
+ 3691: "magaya-network",
+ 3692: "intelsync",
+ 3693: "easl",
+ 3695: "bmc-data-coll",
+ 3696: "telnetcpcd",
+ 3697: "nw-license",
+ 3698: "sagectlpanel",
+ 3699: "kpn-icw",
+ 3700: "lrs-paging",
+ 3701: "netcelera",
+ 3702: "ws-discovery",
+ 3703: "adobeserver-3",
+ 3704: "adobeserver-4",
+ 3705: "adobeserver-5",
+ 3706: "rt-event",
+ 3707: "rt-event-s",
+ 3708: "sun-as-iiops",
+ 3709: "ca-idms",
+ 3710: "portgate-auth",
+ 3711: "edb-server2",
+ 3712: "sentinel-ent",
+ 3713: "tftps",
+ 3714: "delos-dms",
+ 3715: "anoto-rendezv",
+ 3716: "wv-csp-sms-cir",
+ 3717: "wv-csp-udp-cir",
+ 3718: "opus-services",
+ 3719: "itelserverport",
+ 3720: "ufastro-instr",
+ 3721: "xsync",
+ 3722: "xserveraid",
+ 3723: "sychrond",
+ 3724: "blizwow",
+ 3725: "na-er-tip",
+ 3726: "array-manager",
+ 3727: "e-mdu",
+ 3728: "e-woa",
+ 3729: "fksp-audit",
+ 3730: "client-ctrl",
+ 3731: "smap",
+ 3732: "m-wnn",
+ 3733: "multip-msg",
+ 3734: "synel-data",
+ 3735: "pwdis",
+ 3736: "rs-rmi",
+ 3737: "xpanel",
+ 3738: "versatalk",
+ 3739: "launchbird-lm",
+ 3740: "heartbeat",
+ 3741: "wysdma",
+ 3742: "cst-port",
+ 3743: "ipcs-command",
+ 3744: "sasg",
+ 3745: "gw-call-port",
+ 3746: "linktest",
+ 3747: "linktest-s",
+ 3748: "webdata",
+ 3749: "cimtrak",
+ 3750: "cbos-ip-port",
+ 3751: "gprs-cube",
+ 3752: "vipremoteagent",
+ 3753: "nattyserver",
+ 3754: "timestenbroker",
+ 3755: "sas-remote-hlp",
+ 3756: "canon-capt",
+ 3757: "grf-port",
+ 3758: "apw-registry",
+ 3759: "exapt-lmgr",
+ 3760: "adtempusclient",
+ 3761: "gsakmp",
+ 3762: "gbs-smp",
+ 3763: "xo-wave",
+ 3764: "mni-prot-rout",
+ 3765: "rtraceroute",
+ 3766: "sitewatch-s",
+ 3767: "listmgr-port",
+ 3768: "rblcheckd",
+ 3769: "haipe-otnk",
+ 3770: "cindycollab",
+ 3771: "paging-port",
+ 3772: "ctp",
+ 3773: "ctdhercules",
+ 3774: "zicom",
+ 3775: "ispmmgr",
+ 3776: "dvcprov-port",
+ 3777: "jibe-eb",
+ 3778: "c-h-it-port",
+ 3779: "cognima",
+ 3780: "nnp",
+ 3781: "abcvoice-port",
+ 3782: "iso-tp0s",
+ 3783: "bim-pem",
+ 3784: "bfd-control",
+ 3785: "bfd-echo",
+ 3786: "upstriggervsw",
+ 3787: "fintrx",
+ 3788: "isrp-port",
+ 3789: "remotedeploy",
+ 3790: "quickbooksrds",
+ 3791: "tvnetworkvideo",
+ 3792: "sitewatch",
+ 3793: "dcsoftware",
+ 3794: "jaus",
+ 3795: "myblast",
+ 3796: "spw-dialer",
+ 3797: "idps",
+ 3798: "minilock",
+ 3799: "radius-dynauth",
+ 3800: "pwgpsi",
+ 3801: "ibm-mgr",
+ 3802: "vhd",
+ 3803: "soniqsync",
+ 3804: "iqnet-port",
+ 3805: "tcpdataserver",
+ 3806: "wsmlb",
+ 3807: "spugna",
+ 3808: "sun-as-iiops-ca",
+ 3809: "apocd",
+ 3810: "wlanauth",
+ 3811: "amp",
+ 3812: "neto-wol-server",
+ 3813: "rap-ip",
+ 3814: "neto-dcs",
+ 3815: "lansurveyorxml",
+ 3816: "sunlps-http",
+ 3817: "tapeware",
+ 3818: "crinis-hb",
+ 3819: "epl-slp",
+ 3820: "scp",
+ 3821: "pmcp",
+ 3822: "acp-discovery",
+ 3823: "acp-conduit",
+ 3824: "acp-policy",
+ 3825: "ffserver",
+ 3826: "warmux",
+ 3827: "netmpi",
+ 3828: "neteh",
+ 3829: "neteh-ext",
+ 3830: "cernsysmgmtagt",
+ 3831: "dvapps",
+ 3832: "xxnetserver",
+ 3833: "aipn-auth",
+ 3834: "spectardata",
+ 3835: "spectardb",
+ 3836: "markem-dcp",
+ 3837: "mkm-discovery",
+ 3838: "sos",
+ 3839: "amx-rms",
+ 3840: "flirtmitmir",
+ 3841: "shiprush-db-svr",
+ 3842: "nhci",
+ 3843: "quest-agent",
+ 3844: "rnm",
+ 3845: "v-one-spp",
+ 3846: "an-pcp",
+ 3847: "msfw-control",
+ 3848: "item",
+ 3849: "spw-dnspreload",
+ 3850: "qtms-bootstrap",
+ 3851: "spectraport",
+ 3852: "sse-app-config",
+ 3853: "sscan",
+ 3854: "stryker-com",
+ 3855: "opentrac",
+ 3856: "informer",
+ 3857: "trap-port",
+ 3858: "trap-port-mom",
+ 3859: "nav-port",
+ 3860: "sasp",
+ 3861: "winshadow-hd",
+ 3862: "giga-pocket",
+ 3863: "asap-tcp",
+ 3864: "asap-tcp-tls",
+ 3865: "xpl",
+ 3866: "dzdaemon",
+ 3867: "dzoglserver",
+ 3868: "diameter",
+ 3869: "ovsam-mgmt",
+ 3870: "ovsam-d-agent",
+ 3871: "avocent-adsap",
+ 3872: "oem-agent",
+ 3873: "fagordnc",
+ 3874: "sixxsconfig",
+ 3875: "pnbscada",
+ 3876: "dl-agent",
+ 3877: "xmpcr-interface",
+ 3878: "fotogcad",
+ 3879: "appss-lm",
+ 3880: "igrs",
+ 3881: "idac",
+ 3882: "msdts1",
+ 3883: "vrpn",
+ 3884: "softrack-meter",
+ 3885: "topflow-ssl",
+ 3886: "nei-management",
+ 3887: "ciphire-data",
+ 3888: "ciphire-serv",
+ 3889: "dandv-tester",
+ 3890: "ndsconnect",
+ 3891: "rtc-pm-port",
+ 3892: "pcc-image-port",
+ 3893: "cgi-starapi",
+ 3894: "syam-agent",
+ 3895: "syam-smc",
+ 3896: "sdo-tls",
+ 3897: "sdo-ssh",
+ 3898: "senip",
+ 3899: "itv-control",
+ 3900: "udt-os",
+ 3901: "nimsh",
+ 3902: "nimaux",
+ 3903: "charsetmgr",
+ 3904: "omnilink-port",
+ 3905: "mupdate",
+ 3906: "topovista-data",
+ 3907: "imoguia-port",
+ 3908: "hppronetman",
+ 3909: "surfcontrolcpa",
+ 3910: "prnrequest",
+ 3911: "prnstatus",
+ 3912: "gbmt-stars",
+ 3913: "listcrt-port",
+ 3914: "listcrt-port-2",
+ 3915: "agcat",
+ 3916: "wysdmc",
+ 3917: "aftmux",
+ 3918: "pktcablemmcops",
+ 3919: "hyperip",
+ 3920: "exasoftport1",
+ 3921: "herodotus-net",
+ 3922: "sor-update",
+ 3923: "symb-sb-port",
+ 3924: "mpl-gprs-port",
+ 3925: "zmp",
+ 3926: "winport",
+ 3927: "natdataservice",
+ 3928: "netboot-pxe",
+ 3929: "smauth-port",
+ 3930: "syam-webserver",
+ 3931: "msr-plugin-port",
+ 3932: "dyn-site",
+ 3933: "plbserve-port",
+ 3934: "sunfm-port",
+ 3935: "sdp-portmapper",
+ 3936: "mailprox",
+ 3937: "dvbservdsc",
+ 3938: "dbcontrol-agent",
+ 3939: "aamp",
+ 3940: "xecp-node",
+ 3941: "homeportal-web",
+ 3942: "srdp",
+ 3943: "tig",
+ 3944: "sops",
+ 3945: "emcads",
+ 3946: "backupedge",
+ 3947: "ccp",
+ 3948: "apdap",
+ 3949: "drip",
+ 3950: "namemunge",
+ 3951: "pwgippfax",
+ 3952: "i3-sessionmgr",
+ 3953: "xmlink-connect",
+ 3954: "adrep",
+ 3955: "p2pcommunity",
+ 3956: "gvcp",
+ 3957: "mqe-broker",
+ 3958: "mqe-agent",
+ 3959: "treehopper",
+ 3960: "bess",
+ 3961: "proaxess",
+ 3962: "sbi-agent",
+ 3963: "thrp",
+ 3964: "sasggprs",
+ 3965: "ati-ip-to-ncpe",
+ 3966: "bflckmgr",
+ 3967: "ppsms",
+ 3968: "ianywhere-dbns",
+ 3969: "landmarks",
+ 3970: "lanrevagent",
+ 3971: "lanrevserver",
+ 3972: "iconp",
+ 3973: "progistics",
+ 3974: "citysearch",
+ 3975: "airshot",
+ 3976: "opswagent",
+ 3977: "opswmanager",
+ 3978: "secure-cfg-svr",
+ 3979: "smwan",
+ 3980: "acms",
+ 3981: "starfish",
+ 3982: "eis",
+ 3983: "eisp",
+ 3984: "mapper-nodemgr",
+ 3985: "mapper-mapethd",
+ 3986: "mapper-ws-ethd",
+ 3987: "centerline",
+ 3988: "dcs-config",
+ 3989: "bv-queryengine",
+ 3990: "bv-is",
+ 3991: "bv-smcsrv",
+ 3992: "bv-ds",
+ 3993: "bv-agent",
+ 3995: "iss-mgmt-ssl",
+ 3996: "abcsoftware",
+ 3997: "agentsease-db",
+ 3998: "dnx",
+ 3999: "nvcnet",
+ 4000: "terabase",
+ 4001: "newoak",
+ 4002: "pxc-spvr-ft",
+ 4003: "pxc-splr-ft",
+ 4004: "pxc-roid",
+ 4005: "pxc-pin",
+ 4006: "pxc-spvr",
+ 4007: "pxc-splr",
+ 4008: "netcheque",
+ 4009: "chimera-hwm",
+ 4010: "samsung-unidex",
+ 4011: "altserviceboot",
+ 4012: "pda-gate",
+ 4013: "acl-manager",
+ 4014: "taiclock",
+ 4015: "talarian-mcast1",
+ 4016: "talarian-mcast2",
+ 4017: "talarian-mcast3",
+ 4018: "talarian-mcast4",
+ 4019: "talarian-mcast5",
+ 4020: "trap",
+ 4021: "nexus-portal",
+ 4022: "dnox",
+ 4023: "esnm-zoning",
+ 4024: "tnp1-port",
+ 4025: "partimage",
+ 4026: "as-debug",
+ 4027: "bxp",
+ 4028: "dtserver-port",
+ 4029: "ip-qsig",
+ 4030: "jdmn-port",
+ 4031: "suucp",
+ 4032: "vrts-auth-port",
+ 4033: "sanavigator",
+ 4034: "ubxd",
+ 4035: "wap-push-http",
+ 4036: "wap-push-https",
+ 4037: "ravehd",
+ 4038: "fazzt-ptp",
+ 4039: "fazzt-admin",
+ 4040: "yo-main",
+ 4041: "houston",
+ 4042: "ldxp",
+ 4043: "nirp",
+ 4044: "ltp",
+ 4045: "npp",
+ 4046: "acp-proto",
+ 4047: "ctp-state",
+ 4049: "wafs",
+ 4050: "cisco-wafs",
+ 4051: "cppdp",
+ 4052: "interact",
+ 4053: "ccu-comm-1",
+ 4054: "ccu-comm-2",
+ 4055: "ccu-comm-3",
+ 4056: "lms",
+ 4057: "wfm",
+ 4058: "kingfisher",
+ 4059: "dlms-cosem",
+ 4060: "dsmeter-iatc",
+ 4061: "ice-location",
+ 4062: "ice-slocation",
+ 4063: "ice-router",
+ 4064: "ice-srouter",
+ 4065: "avanti-cdp",
+ 4066: "pmas",
+ 4067: "idp",
+ 4068: "ipfltbcst",
+ 4069: "minger",
+ 4070: "tripe",
+ 4071: "aibkup",
+ 4072: "zieto-sock",
+ 4073: "iRAPP",
+ 4074: "cequint-cityid",
+ 4075: "perimlan",
+ 4076: "seraph",
+ 4078: "cssp",
+ 4079: "santools",
+ 4080: "lorica-in",
+ 4081: "lorica-in-sec",
+ 4082: "lorica-out",
+ 4083: "lorica-out-sec",
+ 4085: "ezmessagesrv",
+ 4087: "applusservice",
+ 4088: "npsp",
+ 4089: "opencore",
+ 4090: "omasgport",
+ 4091: "ewinstaller",
+ 4092: "ewdgs",
+ 4093: "pvxpluscs",
+ 4094: "sysrqd",
+ 4095: "xtgui",
+ 4096: "bre",
+ 4097: "patrolview",
+ 4098: "drmsfsd",
+ 4099: "dpcp",
+ 4100: "igo-incognito",
+ 4101: "brlp-0",
+ 4102: "brlp-1",
+ 4103: "brlp-2",
+ 4104: "brlp-3",
+ 4105: "shofar",
+ 4106: "synchronite",
+ 4107: "j-ac",
+ 4108: "accel",
+ 4109: "izm",
+ 4110: "g2tag",
+ 4111: "xgrid",
+ 4112: "apple-vpns-rp",
+ 4113: "aipn-reg",
+ 4114: "jomamqmonitor",
+ 4115: "cds",
+ 4116: "smartcard-tls",
+ 4117: "hillrserv",
+ 4118: "netscript",
+ 4119: "assuria-slm",
+ 4120: "minirem",
+ 4121: "e-builder",
+ 4122: "fprams",
+ 4123: "z-wave",
+ 4124: "tigv2",
+ 4125: "opsview-envoy",
+ 4126: "ddrepl",
+ 4127: "unikeypro",
+ 4128: "nufw",
+ 4129: "nuauth",
+ 4130: "fronet",
+ 4131: "stars",
+ 4132: "nuts-dem",
+ 4133: "nuts-bootp",
+ 4134: "nifty-hmi",
+ 4135: "cl-db-attach",
+ 4136: "cl-db-request",
+ 4137: "cl-db-remote",
+ 4138: "nettest",
+ 4139: "thrtx",
+ 4140: "cedros-fds",
+ 4141: "oirtgsvc",
+ 4142: "oidocsvc",
+ 4143: "oidsr",
+ 4145: "vvr-control",
+ 4146: "tgcconnect",
+ 4147: "vrxpservman",
+ 4148: "hhb-handheld",
+ 4149: "agslb",
+ 4150: "PowerAlert-nsa",
+ 4151: "menandmice-noh",
+ 4152: "idig-mux",
+ 4153: "mbl-battd",
+ 4154: "atlinks",
+ 4155: "bzr",
+ 4156: "stat-results",
+ 4157: "stat-scanner",
+ 4158: "stat-cc",
+ 4159: "nss",
+ 4160: "jini-discovery",
+ 4161: "omscontact",
+ 4162: "omstopology",
+ 4163: "silverpeakpeer",
+ 4164: "silverpeakcomm",
+ 4165: "altcp",
+ 4166: "joost",
+ 4167: "ddgn",
+ 4168: "pslicser",
+ 4169: "iadt",
+ 4170: "d-cinema-csp",
+ 4171: "ml-svnet",
+ 4172: "pcoip",
+ 4174: "smcluster",
+ 4175: "bccp",
+ 4176: "tl-ipcproxy",
+ 4177: "wello",
+ 4178: "storman",
+ 4179: "MaxumSP",
+ 4180: "httpx",
+ 4181: "macbak",
+ 4182: "pcptcpservice",
+ 4183: "cyborgnet",
+ 4184: "universe-suite",
+ 4185: "wcpp",
+ 4186: "boxbackupstore",
+ 4187: "csc-proxy",
+ 4188: "vatata",
+ 4189: "pcep",
+ 4190: "sieve",
+ 4192: "azeti",
+ 4193: "pvxplusio",
+ 4197: "hctl",
+ 4199: "eims-admin",
+ 4300: "corelccam",
+ 4301: "d-data",
+ 4302: "d-data-control",
+ 4303: "srcp",
+ 4304: "owserver",
+ 4305: "batman",
+ 4306: "pinghgl",
+ 4307: "trueconf",
+ 4308: "compx-lockview",
+ 4309: "dserver",
+ 4310: "mirrtex",
+ 4311: "p6ssmc",
+ 4312: "pscl-mgt",
+ 4313: "perrla",
+ 4314: "choiceview-agt",
+ 4316: "choiceview-clt",
+ 4320: "fdt-rcatp",
+ 4321: "rwhois",
+ 4322: "trim-event",
+ 4323: "trim-ice",
+ 4325: "geognosisman",
+ 4326: "geognosis",
+ 4327: "jaxer-web",
+ 4328: "jaxer-manager",
+ 4329: "publiqare-sync",
+ 4330: "dey-sapi",
+ 4331: "ktickets-rest",
+ 4333: "ahsp",
+ 4334: "netconf-ch-ssh",
+ 4335: "netconf-ch-tls",
+ 4336: "restconf-ch-tls",
+ 4340: "gaia",
+ 4341: "lisp-data",
+ 4342: "lisp-cons",
+ 4343: "unicall",
+ 4344: "vinainstall",
+ 4345: "m4-network-as",
+ 4346: "elanlm",
+ 4347: "lansurveyor",
+ 4348: "itose",
+ 4349: "fsportmap",
+ 4350: "net-device",
+ 4351: "plcy-net-svcs",
+ 4352: "pjlink",
+ 4353: "f5-iquery",
+ 4354: "qsnet-trans",
+ 4355: "qsnet-workst",
+ 4356: "qsnet-assist",
+ 4357: "qsnet-cond",
+ 4358: "qsnet-nucl",
+ 4359: "omabcastltkm",
+ 4360: "matrix-vnet",
+ 4368: "wxbrief",
+ 4369: "epmd",
+ 4370: "elpro-tunnel",
+ 4371: "l2c-control",
+ 4372: "l2c-data",
+ 4373: "remctl",
+ 4374: "psi-ptt",
+ 4375: "tolteces",
+ 4376: "bip",
+ 4377: "cp-spxsvr",
+ 4378: "cp-spxdpy",
+ 4379: "ctdb",
+ 4389: "xandros-cms",
+ 4390: "wiegand",
+ 4391: "apwi-imserver",
+ 4392: "apwi-rxserver",
+ 4393: "apwi-rxspooler",
+ 4395: "omnivisionesx",
+ 4396: "fly",
+ 4400: "ds-srv",
+ 4401: "ds-srvr",
+ 4402: "ds-clnt",
+ 4403: "ds-user",
+ 4404: "ds-admin",
+ 4405: "ds-mail",
+ 4406: "ds-slp",
+ 4407: "nacagent",
+ 4408: "slscc",
+ 4409: "netcabinet-com",
+ 4410: "itwo-server",
+ 4411: "found",
+ 4413: "avi-nms",
+ 4414: "updog",
+ 4415: "brcd-vr-req",
+ 4416: "pjj-player",
+ 4417: "workflowdir",
+ 4419: "cbp",
+ 4420: "nvm-express",
+ 4421: "scaleft",
+ 4422: "tsepisp",
+ 4423: "thingkit",
+ 4425: "netrockey6",
+ 4426: "beacon-port-2",
+ 4427: "drizzle",
+ 4428: "omviserver",
+ 4429: "omviagent",
+ 4430: "rsqlserver",
+ 4431: "wspipe",
+ 4432: "l-acoustics",
+ 4433: "vop",
+ 4442: "saris",
+ 4443: "pharos",
+ 4444: "krb524",
+ 4445: "upnotifyp",
+ 4446: "n1-fwp",
+ 4447: "n1-rmgmt",
+ 4448: "asc-slmd",
+ 4449: "privatewire",
+ 4450: "camp",
+ 4451: "ctisystemmsg",
+ 4452: "ctiprogramload",
+ 4453: "nssalertmgr",
+ 4454: "nssagentmgr",
+ 4455: "prchat-user",
+ 4456: "prchat-server",
+ 4457: "prRegister",
+ 4458: "mcp",
+ 4484: "hpssmgmt",
+ 4485: "assyst-dr",
+ 4486: "icms",
+ 4487: "prex-tcp",
+ 4488: "awacs-ice",
+ 4500: "ipsec-nat-t",
+ 4535: "ehs",
+ 4536: "ehs-ssl",
+ 4537: "wssauthsvc",
+ 4538: "swx-gate",
+ 4545: "worldscores",
+ 4546: "sf-lm",
+ 4547: "lanner-lm",
+ 4548: "synchromesh",
+ 4549: "aegate",
+ 4550: "gds-adppiw-db",
+ 4551: "ieee-mih",
+ 4552: "menandmice-mon",
+ 4553: "icshostsvc",
+ 4554: "msfrs",
+ 4555: "rsip",
+ 4556: "dtn-bundle",
+ 4559: "hylafax",
+ 4563: "amahi-anywhere",
+ 4566: "kwtc",
+ 4567: "tram",
+ 4568: "bmc-reporting",
+ 4569: "iax",
+ 4570: "deploymentmap",
+ 4573: "cardifftec-back",
+ 4590: "rid",
+ 4591: "l3t-at-an",
+ 4593: "ipt-anri-anri",
+ 4594: "ias-session",
+ 4595: "ias-paging",
+ 4596: "ias-neighbor",
+ 4597: "a21-an-1xbs",
+ 4598: "a16-an-an",
+ 4599: "a17-an-an",
+ 4600: "piranha1",
+ 4601: "piranha2",
+ 4602: "mtsserver",
+ 4603: "menandmice-upg",
+ 4604: "irp",
+ 4605: "sixchat",
+ 4658: "playsta2-app",
+ 4659: "playsta2-lob",
+ 4660: "smaclmgr",
+ 4661: "kar2ouche",
+ 4662: "oms",
+ 4663: "noteit",
+ 4664: "ems",
+ 4665: "contclientms",
+ 4666: "eportcomm",
+ 4667: "mmacomm",
+ 4668: "mmaeds",
+ 4669: "eportcommdata",
+ 4670: "light",
+ 4671: "acter",
+ 4672: "rfa",
+ 4673: "cxws",
+ 4674: "appiq-mgmt",
+ 4675: "dhct-status",
+ 4676: "dhct-alerts",
+ 4677: "bcs",
+ 4678: "traversal",
+ 4679: "mgesupervision",
+ 4680: "mgemanagement",
+ 4681: "parliant",
+ 4682: "finisar",
+ 4683: "spike",
+ 4684: "rfid-rp1",
+ 4685: "autopac",
+ 4686: "msp-os",
+ 4687: "nst",
+ 4688: "mobile-p2p",
+ 4689: "altovacentral",
+ 4690: "prelude",
+ 4691: "mtn",
+ 4692: "conspiracy",
+ 4700: "netxms-agent",
+ 4701: "netxms-mgmt",
+ 4702: "netxms-sync",
+ 4703: "npqes-test",
+ 4704: "assuria-ins",
+ 4711: "trinity-dist",
+ 4725: "truckstar",
+ 4727: "fcis",
+ 4728: "capmux",
+ 4730: "gearman",
+ 4731: "remcap",
+ 4733: "resorcs",
+ 4737: "ipdr-sp",
+ 4738: "solera-lpn",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 4741: "lumimgrd",
+ 4742: "sicct",
+ 4743: "openhpid",
+ 4744: "ifsp",
+ 4745: "fmp",
+ 4749: "profilemac",
+ 4750: "ssad",
+ 4751: "spocp",
+ 4752: "snap",
+ 4753: "simon",
+ 4756: "RDCenter",
+ 4774: "converge",
+ 4784: "bfd-multi-ctl",
+ 4786: "smart-install",
+ 4787: "sia-ctrl-plane",
+ 4788: "xmcp",
+ 4800: "iims",
+ 4801: "iwec",
+ 4802: "ilss",
+ 4803: "notateit",
+ 4827: "htcp",
+ 4837: "varadero-0",
+ 4838: "varadero-1",
+ 4839: "varadero-2",
+ 4840: "opcua-tcp",
+ 4841: "quosa",
+ 4842: "gw-asv",
+ 4843: "opcua-tls",
+ 4844: "gw-log",
+ 4845: "wcr-remlib",
+ 4846: "contamac-icm",
+ 4847: "wfc",
+ 4848: "appserv-http",
+ 4849: "appserv-https",
+ 4850: "sun-as-nodeagt",
+ 4851: "derby-repli",
+ 4867: "unify-debug",
+ 4868: "phrelay",
+ 4869: "phrelaydbg",
+ 4870: "cc-tracking",
+ 4871: "wired",
+ 4876: "tritium-can",
+ 4877: "lmcs",
+ 4879: "wsdl-event",
+ 4880: "hislip",
+ 4883: "wmlserver",
+ 4884: "hivestor",
+ 4885: "abbs",
+ 4894: "lyskom",
+ 4899: "radmin-port",
+ 4900: "hfcs",
+ 4901: "flr-agent",
+ 4902: "magiccontrol",
+ 4912: "lutap",
+ 4913: "lutcp",
+ 4914: "bones",
+ 4915: "frcs",
+ 4940: "eq-office-4940",
+ 4941: "eq-office-4941",
+ 4942: "eq-office-4942",
+ 4949: "munin",
+ 4950: "sybasesrvmon",
+ 4951: "pwgwims",
+ 4952: "sagxtsds",
+ 4953: "dbsyncarbiter",
+ 4969: "ccss-qmm",
+ 4970: "ccss-qsm",
+ 4971: "burp",
+ 4984: "webyast",
+ 4985: "gerhcs",
+ 4986: "mrip",
+ 4987: "smar-se-port1",
+ 4988: "smar-se-port2",
+ 4989: "parallel",
+ 4990: "busycal",
+ 4991: "vrt",
+ 4999: "hfcs-manager",
+ 5000: "commplex-main",
+ 5001: "commplex-link",
+ 5002: "rfe",
+ 5003: "fmpro-internal",
+ 5004: "avt-profile-1",
+ 5005: "avt-profile-2",
+ 5006: "wsm-server",
+ 5007: "wsm-server-ssl",
+ 5008: "synapsis-edge",
+ 5009: "winfs",
+ 5010: "telelpathstart",
+ 5011: "telelpathattack",
+ 5012: "nsp",
+ 5013: "fmpro-v6",
+ 5015: "fmwp",
+ 5020: "zenginkyo-1",
+ 5021: "zenginkyo-2",
+ 5022: "mice",
+ 5023: "htuilsrv",
+ 5024: "scpi-telnet",
+ 5025: "scpi-raw",
+ 5026: "strexec-d",
+ 5027: "strexec-s",
+ 5028: "qvr",
+ 5029: "infobright",
+ 5030: "surfpass",
+ 5032: "signacert-agent",
+ 5033: "jtnetd-server",
+ 5034: "jtnetd-status",
+ 5042: "asnaacceler8db",
+ 5043: "swxadmin",
+ 5044: "lxi-evntsvc",
+ 5045: "osp",
+ 5048: "texai",
+ 5049: "ivocalize",
+ 5050: "mmcc",
+ 5051: "ita-agent",
+ 5052: "ita-manager",
+ 5053: "rlm",
+ 5054: "rlm-admin",
+ 5055: "unot",
+ 5056: "intecom-ps1",
+ 5057: "intecom-ps2",
+ 5059: "sds",
+ 5060: "sip",
+ 5061: "sips",
+ 5062: "na-localise",
+ 5063: "csrpc",
+ 5064: "ca-1",
+ 5065: "ca-2",
+ 5066: "stanag-5066",
+ 5067: "authentx",
+ 5068: "bitforestsrv",
+ 5069: "i-net-2000-npr",
+ 5070: "vtsas",
+ 5071: "powerschool",
+ 5072: "ayiya",
+ 5073: "tag-pm",
+ 5074: "alesquery",
+ 5075: "pvaccess",
+ 5080: "onscreen",
+ 5081: "sdl-ets",
+ 5082: "qcp",
+ 5083: "qfp",
+ 5084: "llrp",
+ 5085: "encrypted-llrp",
+ 5086: "aprigo-cs",
+ 5087: "biotic",
+ 5093: "sentinel-lm",
+ 5094: "hart-ip",
+ 5099: "sentlm-srv2srv",
+ 5100: "socalia",
+ 5101: "talarian-tcp",
+ 5102: "oms-nonsecure",
+ 5103: "actifio-c2c",
+ 5106: "actifioudsagent",
+ 5107: "actifioreplic",
+ 5111: "taep-as-svc",
+ 5112: "pm-cmdsvr",
+ 5114: "ev-services",
+ 5115: "autobuild",
+ 5117: "gradecam",
+ 5120: "barracuda-bbs",
+ 5133: "nbt-pc",
+ 5134: "ppactivation",
+ 5135: "erp-scale",
+ 5137: "ctsd",
+ 5145: "rmonitor-secure",
+ 5146: "social-alarm",
+ 5150: "atmp",
+ 5151: "esri-sde",
+ 5152: "sde-discovery",
+ 5153: "toruxserver",
+ 5154: "bzflag",
+ 5155: "asctrl-agent",
+ 5156: "rugameonline",
+ 5157: "mediat",
+ 5161: "snmpssh",
+ 5162: "snmpssh-trap",
+ 5163: "sbackup",
+ 5164: "vpa",
+ 5165: "ife-icorp",
+ 5166: "winpcs",
+ 5167: "scte104",
+ 5168: "scte30",
+ 5172: "pcoip-mgmt",
+ 5190: "aol",
+ 5191: "aol-1",
+ 5192: "aol-2",
+ 5193: "aol-3",
+ 5194: "cpscomm",
+ 5195: "ampl-lic",
+ 5196: "ampl-tableproxy",
+ 5197: "tunstall-lwp",
+ 5200: "targus-getdata",
+ 5201: "targus-getdata1",
+ 5202: "targus-getdata2",
+ 5203: "targus-getdata3",
+ 5209: "nomad",
+ 5215: "noteza",
+ 5221: "3exmp",
+ 5222: "xmpp-client",
+ 5223: "hpvirtgrp",
+ 5224: "hpvirtctrl",
+ 5225: "hp-server",
+ 5226: "hp-status",
+ 5227: "perfd",
+ 5228: "hpvroom",
+ 5229: "jaxflow",
+ 5230: "jaxflow-data",
+ 5231: "crusecontrol",
+ 5232: "csedaemon",
+ 5233: "enfs",
+ 5234: "eenet",
+ 5235: "galaxy-network",
+ 5236: "padl2sim",
+ 5237: "mnet-discovery",
+ 5245: "downtools",
+ 5248: "caacws",
+ 5249: "caaclang2",
+ 5250: "soagateway",
+ 5251: "caevms",
+ 5252: "movaz-ssc",
+ 5253: "kpdp",
+ 5254: "logcabin",
+ 5264: "3com-njack-1",
+ 5265: "3com-njack-2",
+ 5269: "xmpp-server",
+ 5270: "cartographerxmp",
+ 5271: "cuelink",
+ 5272: "pk",
+ 5280: "xmpp-bosh",
+ 5281: "undo-lm",
+ 5282: "transmit-port",
+ 5298: "presence",
+ 5299: "nlg-data",
+ 5300: "hacl-hb",
+ 5301: "hacl-gs",
+ 5302: "hacl-cfg",
+ 5303: "hacl-probe",
+ 5304: "hacl-local",
+ 5305: "hacl-test",
+ 5306: "sun-mc-grp",
+ 5307: "sco-aip",
+ 5308: "cfengine",
+ 5309: "jprinter",
+ 5310: "outlaws",
+ 5312: "permabit-cs",
+ 5313: "rrdp",
+ 5314: "opalis-rbt-ipc",
+ 5315: "hacl-poll",
+ 5316: "hpbladems",
+ 5317: "hpdevms",
+ 5318: "pkix-cmc",
+ 5320: "bsfserver-zn",
+ 5321: "bsfsvr-zn-ssl",
+ 5343: "kfserver",
+ 5344: "xkotodrcp",
+ 5349: "stuns",
+ 5352: "dns-llq",
+ 5353: "mdns",
+ 5354: "mdnsresponder",
+ 5355: "llmnr",
+ 5356: "ms-smlbiz",
+ 5357: "wsdapi",
+ 5358: "wsdapi-s",
+ 5359: "ms-alerter",
+ 5360: "ms-sideshow",
+ 5361: "ms-s-sideshow",
+ 5362: "serverwsd2",
+ 5363: "net-projection",
+ 5397: "stresstester",
+ 5398: "elektron-admin",
+ 5399: "securitychase",
+ 5400: "excerpt",
+ 5401: "excerpts",
+ 5402: "mftp",
+ 5403: "hpoms-ci-lstn",
+ 5404: "hpoms-dps-lstn",
+ 5405: "netsupport",
+ 5406: "systemics-sox",
+ 5407: "foresyte-clear",
+ 5408: "foresyte-sec",
+ 5409: "salient-dtasrv",
+ 5410: "salient-usrmgr",
+ 5411: "actnet",
+ 5412: "continuus",
+ 5413: "wwiotalk",
+ 5414: "statusd",
+ 5415: "ns-server",
+ 5416: "sns-gateway",
+ 5417: "sns-agent",
+ 5418: "mcntp",
+ 5419: "dj-ice",
+ 5420: "cylink-c",
+ 5421: "netsupport2",
+ 5422: "salient-mux",
+ 5423: "virtualuser",
+ 5424: "beyond-remote",
+ 5425: "br-channel",
+ 5426: "devbasic",
+ 5427: "sco-peer-tta",
+ 5428: "telaconsole",
+ 5429: "base",
+ 5430: "radec-corp",
+ 5431: "park-agent",
+ 5432: "postgresql",
+ 5433: "pyrrho",
+ 5434: "sgi-arrayd",
+ 5435: "sceanics",
+ 5443: "spss",
+ 5445: "smbdirect",
+ 5450: "tiepie",
+ 5453: "surebox",
+ 5454: "apc-5454",
+ 5455: "apc-5455",
+ 5456: "apc-5456",
+ 5461: "silkmeter",
+ 5462: "ttl-publisher",
+ 5463: "ttlpriceproxy",
+ 5464: "quailnet",
+ 5465: "netops-broker",
+ 5470: "apsolab-col",
+ 5471: "apsolab-cols",
+ 5472: "apsolab-tag",
+ 5473: "apsolab-tags",
+ 5475: "apsolab-data",
+ 5500: "fcp-addr-srvr1",
+ 5501: "fcp-addr-srvr2",
+ 5502: "fcp-srvr-inst1",
+ 5503: "fcp-srvr-inst2",
+ 5504: "fcp-cics-gw1",
+ 5505: "checkoutdb",
+ 5506: "amc",
+ 5507: "psl-management",
+ 5550: "cbus",
+ 5553: "sgi-eventmond",
+ 5554: "sgi-esphttp",
+ 5555: "personal-agent",
+ 5556: "freeciv",
+ 5557: "farenet",
+ 5565: "hpe-dp-bura",
+ 5566: "westec-connect",
+ 5567: "dof-dps-mc-sec",
+ 5568: "sdt",
+ 5569: "rdmnet-ctrl",
+ 5573: "sdmmp",
+ 5574: "lsi-bobcat",
+ 5575: "ora-oap",
+ 5579: "fdtracks",
+ 5580: "tmosms0",
+ 5581: "tmosms1",
+ 5582: "fac-restore",
+ 5583: "tmo-icon-sync",
+ 5584: "bis-web",
+ 5585: "bis-sync",
+ 5586: "att-mt-sms",
+ 5597: "ininmessaging",
+ 5598: "mctfeed",
+ 5599: "esinstall",
+ 5600: "esmmanager",
+ 5601: "esmagent",
+ 5602: "a1-msc",
+ 5603: "a1-bs",
+ 5604: "a3-sdunode",
+ 5605: "a4-sdunode",
+ 5618: "efr",
+ 5627: "ninaf",
+ 5628: "htrust",
+ 5629: "symantec-sfdb",
+ 5630: "precise-comm",
+ 5631: "pcanywheredata",
+ 5632: "pcanywherestat",
+ 5633: "beorl",
+ 5634: "xprtld",
+ 5635: "sfmsso",
+ 5636: "sfm-db-server",
+ 5637: "cssc",
+ 5638: "flcrs",
+ 5639: "ics",
+ 5646: "vfmobile",
+ 5666: "nrpe",
+ 5670: "filemq",
+ 5671: "amqps",
+ 5672: "amqp",
+ 5673: "jms",
+ 5674: "hyperscsi-port",
+ 5675: "v5ua",
+ 5676: "raadmin",
+ 5677: "questdb2-lnchr",
+ 5678: "rrac",
+ 5679: "dccm",
+ 5680: "auriga-router",
+ 5681: "ncxcp",
+ 5688: "ggz",
+ 5689: "qmvideo",
+ 5693: "rbsystem",
+ 5696: "kmip",
+ 5700: "supportassist",
+ 5705: "storageos",
+ 5713: "proshareaudio",
+ 5714: "prosharevideo",
+ 5715: "prosharedata",
+ 5716: "prosharerequest",
+ 5717: "prosharenotify",
+ 5718: "dpm",
+ 5719: "dpm-agent",
+ 5720: "ms-licensing",
+ 5721: "dtpt",
+ 5722: "msdfsr",
+ 5723: "omhs",
+ 5724: "omsdk",
+ 5725: "ms-ilm",
+ 5726: "ms-ilm-sts",
+ 5727: "asgenf",
+ 5728: "io-dist-data",
+ 5729: "openmail",
+ 5730: "unieng",
+ 5741: "ida-discover1",
+ 5742: "ida-discover2",
+ 5743: "watchdoc-pod",
+ 5744: "watchdoc",
+ 5745: "fcopy-server",
+ 5746: "fcopys-server",
+ 5747: "tunatic",
+ 5748: "tunalyzer",
+ 5750: "rscd",
+ 5755: "openmailg",
+ 5757: "x500ms",
+ 5766: "openmailns",
+ 5767: "s-openmail",
+ 5768: "openmailpxy",
+ 5769: "spramsca",
+ 5770: "spramsd",
+ 5771: "netagent",
+ 5777: "dali-port",
+ 5780: "vts-rpc",
+ 5781: "3par-evts",
+ 5782: "3par-mgmt",
+ 5783: "3par-mgmt-ssl",
+ 5785: "3par-rcopy",
+ 5793: "xtreamx",
+ 5813: "icmpd",
+ 5814: "spt-automation",
+ 5841: "shiprush-d-ch",
+ 5842: "reversion",
+ 5859: "wherehoo",
+ 5863: "ppsuitemsg",
+ 5868: "diameters",
+ 5883: "jute",
+ 5900: "rfb",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 5963: "indy",
+ 5968: "mppolicy-v5",
+ 5969: "mppolicy-mgr",
+ 5984: "couchdb",
+ 5985: "wsman",
+ 5986: "wsmans",
+ 5987: "wbem-rmi",
+ 5988: "wbem-http",
+ 5989: "wbem-https",
+ 5990: "wbem-exp-https",
+ 5991: "nuxsl",
+ 5992: "consul-insight",
+ 5993: "cim-rs",
+ 5999: "cvsup",
+ 6064: "ndl-ahp-svc",
+ 6065: "winpharaoh",
+ 6066: "ewctsp",
+ 6068: "gsmp-ancp",
+ 6069: "trip",
+ 6070: "messageasap",
+ 6071: "ssdtp",
+ 6072: "diagnose-proc",
+ 6073: "directplay8",
+ 6074: "max",
+ 6075: "dpm-acm",
+ 6076: "msft-dpm-cert",
+ 6077: "iconstructsrv",
+ 6084: "reload-config",
+ 6085: "konspire2b",
+ 6086: "pdtp",
+ 6087: "ldss",
+ 6088: "doglms",
+ 6099: "raxa-mgmt",
+ 6100: "synchronet-db",
+ 6101: "synchronet-rtc",
+ 6102: "synchronet-upd",
+ 6103: "rets",
+ 6104: "dbdb",
+ 6105: "primaserver",
+ 6106: "mpsserver",
+ 6107: "etc-control",
+ 6108: "sercomm-scadmin",
+ 6109: "globecast-id",
+ 6110: "softcm",
+ 6111: "spc",
+ 6112: "dtspcd",
+ 6113: "dayliteserver",
+ 6114: "wrspice",
+ 6115: "xic",
+ 6116: "xtlserv",
+ 6117: "daylitetouch",
+ 6121: "spdy",
+ 6122: "bex-webadmin",
+ 6123: "backup-express",
+ 6124: "pnbs",
+ 6130: "damewaremobgtwy",
+ 6133: "nbt-wol",
+ 6140: "pulsonixnls",
+ 6141: "meta-corp",
+ 6142: "aspentec-lm",
+ 6143: "watershed-lm",
+ 6144: "statsci1-lm",
+ 6145: "statsci2-lm",
+ 6146: "lonewolf-lm",
+ 6147: "montage-lm",
+ 6148: "ricardo-lm",
+ 6149: "tal-pod",
+ 6159: "efb-aci",
+ 6160: "ecmp",
+ 6161: "patrol-ism",
+ 6162: "patrol-coll",
+ 6163: "pscribe",
+ 6200: "lm-x",
+ 6209: "qmtps",
+ 6222: "radmind",
+ 6241: "jeol-nsdtp-1",
+ 6242: "jeol-nsdtp-2",
+ 6243: "jeol-nsdtp-3",
+ 6244: "jeol-nsdtp-4",
+ 6251: "tl1-raw-ssl",
+ 6252: "tl1-ssh",
+ 6253: "crip",
+ 6267: "gld",
+ 6268: "grid",
+ 6269: "grid-alt",
+ 6300: "bmc-grx",
+ 6301: "bmc-ctd-ldap",
+ 6306: "ufmp",
+ 6315: "scup",
+ 6316: "abb-escp",
+ 6317: "nav-data-cmd",
+ 6320: "repsvc",
+ 6321: "emp-server1",
+ 6322: "emp-server2",
+ 6324: "hrd-ncs",
+ 6325: "dt-mgmtsvc",
+ 6326: "dt-vra",
+ 6343: "sflow",
+ 6344: "streletz",
+ 6346: "gnutella-svc",
+ 6347: "gnutella-rtr",
+ 6350: "adap",
+ 6355: "pmcs",
+ 6360: "metaedit-mu",
+ 6370: "metaedit-se",
+ 6379: "redis",
+ 6382: "metatude-mds",
+ 6389: "clariion-evr01",
+ 6390: "metaedit-ws",
+ 6417: "faxcomservice",
+ 6418: "syserverremote",
+ 6419: "svdrp",
+ 6420: "nim-vdrshell",
+ 6421: "nim-wan",
+ 6432: "pgbouncer",
+ 6442: "tarp",
+ 6443: "sun-sr-https",
+ 6444: "sge-qmaster",
+ 6445: "sge-execd",
+ 6446: "mysql-proxy",
+ 6455: "skip-cert-recv",
+ 6456: "skip-cert-send",
+ 6464: "ieee11073-20701",
+ 6471: "lvision-lm",
+ 6480: "sun-sr-http",
+ 6481: "servicetags",
+ 6482: "ldoms-mgmt",
+ 6483: "SunVTS-RMI",
+ 6484: "sun-sr-jms",
+ 6485: "sun-sr-iiop",
+ 6486: "sun-sr-iiops",
+ 6487: "sun-sr-iiop-aut",
+ 6488: "sun-sr-jmx",
+ 6489: "sun-sr-admin",
+ 6500: "boks",
+ 6501: "boks-servc",
+ 6502: "boks-servm",
+ 6503: "boks-clntd",
+ 6505: "badm-priv",
+ 6506: "badm-pub",
+ 6507: "bdir-priv",
+ 6508: "bdir-pub",
+ 6509: "mgcs-mfp-port",
+ 6510: "mcer-port",
+ 6513: "netconf-tls",
+ 6514: "syslog-tls",
+ 6515: "elipse-rec",
+ 6543: "lds-distrib",
+ 6544: "lds-dump",
+ 6547: "apc-6547",
+ 6548: "apc-6548",
+ 6549: "apc-6549",
+ 6550: "fg-sysupdate",
+ 6551: "sum",
+ 6558: "xdsxdm",
+ 6566: "sane-port",
+ 6568: "canit-store",
+ 6579: "affiliate",
+ 6580: "parsec-master",
+ 6581: "parsec-peer",
+ 6582: "parsec-game",
+ 6583: "joaJewelSuite",
+ 6600: "mshvlm",
+ 6601: "mstmg-sstp",
+ 6602: "wsscomfrmwk",
+ 6619: "odette-ftps",
+ 6620: "kftp-data",
+ 6621: "kftp",
+ 6622: "mcftp",
+ 6623: "ktelnet",
+ 6624: "datascaler-db",
+ 6625: "datascaler-ctl",
+ 6626: "wago-service",
+ 6627: "nexgen",
+ 6628: "afesc-mc",
+ 6629: "nexgen-aux",
+ 6632: "mxodbc-connect",
+ 6640: "ovsdb",
+ 6653: "openflow",
+ 6655: "pcs-sf-ui-man",
+ 6656: "emgmsg",
+ 6670: "vocaltec-gold",
+ 6671: "p4p-portal",
+ 6672: "vision-server",
+ 6673: "vision-elmd",
+ 6678: "vfbp",
+ 6679: "osaut",
+ 6687: "clever-ctrace",
+ 6688: "clever-tcpip",
+ 6689: "tsa",
+ 6690: "cleverdetect",
+ 6697: "ircs-u",
+ 6701: "kti-icad-srvr",
+ 6702: "e-design-net",
+ 6703: "e-design-web",
+ 6714: "ibprotocol",
+ 6715: "fibotrader-com",
+ 6716: "princity-agent",
+ 6767: "bmc-perf-agent",
+ 6768: "bmc-perf-mgrd",
+ 6769: "adi-gxp-srvprt",
+ 6770: "plysrv-http",
+ 6771: "plysrv-https",
+ 6777: "ntz-tracker",
+ 6778: "ntz-p2p-storage",
+ 6785: "dgpf-exchg",
+ 6786: "smc-jmx",
+ 6787: "smc-admin",
+ 6788: "smc-http",
+ 6789: "radg",
+ 6790: "hnmp",
+ 6791: "hnm",
+ 6801: "acnet",
+ 6817: "pentbox-sim",
+ 6831: "ambit-lm",
+ 6841: "netmo-default",
+ 6842: "netmo-http",
+ 6850: "iccrushmore",
+ 6868: "acctopus-cc",
+ 6888: "muse",
+ 6900: "rtimeviewer",
+ 6901: "jetstream",
+ 6935: "ethoscan",
+ 6936: "xsmsvc",
+ 6946: "bioserver",
+ 6951: "otlp",
+ 6961: "jmact3",
+ 6962: "jmevt2",
+ 6963: "swismgr1",
+ 6964: "swismgr2",
+ 6965: "swistrap",
+ 6966: "swispol",
+ 6969: "acmsoda",
+ 6970: "conductor",
+ 6997: "MobilitySrv",
+ 6998: "iatp-highpri",
+ 6999: "iatp-normalpri",
+ 7000: "afs3-fileserver",
+ 7001: "afs3-callback",
+ 7002: "afs3-prserver",
+ 7003: "afs3-vlserver",
+ 7004: "afs3-kaserver",
+ 7005: "afs3-volser",
+ 7006: "afs3-errors",
+ 7007: "afs3-bos",
+ 7008: "afs3-update",
+ 7009: "afs3-rmtsys",
+ 7010: "ups-onlinet",
+ 7011: "talon-disc",
+ 7012: "talon-engine",
+ 7013: "microtalon-dis",
+ 7014: "microtalon-com",
+ 7015: "talon-webserver",
+ 7016: "spg",
+ 7017: "grasp",
+ 7018: "fisa-svc",
+ 7019: "doceri-ctl",
+ 7020: "dpserve",
+ 7021: "dpserveadmin",
+ 7022: "ctdp",
+ 7023: "ct2nmcs",
+ 7024: "vmsvc",
+ 7025: "vmsvc-2",
+ 7030: "op-probe",
+ 7031: "iposplanet",
+ 7070: "arcp",
+ 7071: "iwg1",
+ 7073: "martalk",
+ 7080: "empowerid",
+ 7099: "lazy-ptop",
+ 7100: "font-service",
+ 7101: "elcn",
+ 7117: "rothaga",
+ 7121: "virprot-lm",
+ 7128: "scenidm",
+ 7129: "scenccs",
+ 7161: "cabsm-comm",
+ 7162: "caistoragemgr",
+ 7163: "cacsambroker",
+ 7164: "fsr",
+ 7165: "doc-server",
+ 7166: "aruba-server",
+ 7167: "casrmagent",
+ 7168: "cnckadserver",
+ 7169: "ccag-pib",
+ 7170: "nsrp",
+ 7171: "drm-production",
+ 7172: "metalbend",
+ 7173: "zsecure",
+ 7174: "clutild",
+ 7200: "fodms",
+ 7201: "dlip",
+ 7202: "pon-ictp",
+ 7215: "PS-Server",
+ 7216: "PS-Capture-Pro",
+ 7227: "ramp",
+ 7228: "citrixupp",
+ 7229: "citrixuppg",
+ 7236: "display",
+ 7237: "pads",
+ 7244: "frc-hicp",
+ 7262: "cnap",
+ 7272: "watchme-7272",
+ 7273: "oma-rlp",
+ 7274: "oma-rlp-s",
+ 7275: "oma-ulp",
+ 7276: "oma-ilp",
+ 7277: "oma-ilp-s",
+ 7278: "oma-dcdocbs",
+ 7279: "ctxlic",
+ 7280: "itactionserver1",
+ 7281: "itactionserver2",
+ 7282: "mzca-action",
+ 7283: "genstat",
+ 7365: "lcm-server",
+ 7391: "mindfilesys",
+ 7392: "mrssrendezvous",
+ 7393: "nfoldman",
+ 7394: "fse",
+ 7395: "winqedit",
+ 7397: "hexarc",
+ 7400: "rtps-discovery",
+ 7401: "rtps-dd-ut",
+ 7402: "rtps-dd-mt",
+ 7410: "ionixnetmon",
+ 7411: "daqstream",
+ 7421: "mtportmon",
+ 7426: "pmdmgr",
+ 7427: "oveadmgr",
+ 7428: "ovladmgr",
+ 7429: "opi-sock",
+ 7430: "xmpv7",
+ 7431: "pmd",
+ 7437: "faximum",
+ 7443: "oracleas-https",
+ 7471: "sttunnel",
+ 7473: "rise",
+ 7474: "neo4j",
+ 7478: "openit",
+ 7491: "telops-lmd",
+ 7500: "silhouette",
+ 7501: "ovbus",
+ 7508: "adcp",
+ 7509: "acplt",
+ 7510: "ovhpas",
+ 7511: "pafec-lm",
+ 7542: "saratoga",
+ 7543: "atul",
+ 7544: "nta-ds",
+ 7545: "nta-us",
+ 7546: "cfs",
+ 7547: "cwmp",
+ 7548: "tidp",
+ 7549: "nls-tl",
+ 7551: "controlone-con",
+ 7560: "sncp",
+ 7563: "cfw",
+ 7566: "vsi-omega",
+ 7569: "dell-eql-asm",
+ 7570: "aries-kfinder",
+ 7574: "coherence",
+ 7588: "sun-lm",
+ 7606: "mipi-debug",
+ 7624: "indi",
+ 7626: "simco",
+ 7627: "soap-http",
+ 7628: "zen-pawn",
+ 7629: "xdas",
+ 7630: "hawk",
+ 7631: "tesla-sys-msg",
+ 7633: "pmdfmgt",
+ 7648: "cuseeme",
+ 7672: "imqstomp",
+ 7673: "imqstomps",
+ 7674: "imqtunnels",
+ 7675: "imqtunnel",
+ 7676: "imqbrokerd",
+ 7677: "sun-user-https",
+ 7680: "pando-pub",
+ 7683: "dmt",
+ 7687: "bolt",
+ 7689: "collaber",
+ 7697: "klio",
+ 7700: "em7-secom",
+ 7707: "sync-em7",
+ 7708: "scinet",
+ 7720: "medimageportal",
+ 7724: "nsdeepfreezectl",
+ 7725: "nitrogen",
+ 7726: "freezexservice",
+ 7727: "trident-data",
+ 7728: "osvr",
+ 7734: "smip",
+ 7738: "aiagent",
+ 7741: "scriptview",
+ 7742: "msss",
+ 7743: "sstp-1",
+ 7744: "raqmon-pdu",
+ 7747: "prgp",
+ 7775: "inetfs",
+ 7777: "cbt",
+ 7778: "interwise",
+ 7779: "vstat",
+ 7781: "accu-lmgr",
+ 7786: "minivend",
+ 7787: "popup-reminders",
+ 7789: "office-tools",
+ 7794: "q3ade",
+ 7797: "pnet-conn",
+ 7798: "pnet-enc",
+ 7799: "altbsdp",
+ 7800: "asr",
+ 7801: "ssp-client",
+ 7810: "rbt-wanopt",
+ 7845: "apc-7845",
+ 7846: "apc-7846",
+ 7847: "csoauth",
+ 7869: "mobileanalyzer",
+ 7870: "rbt-smc",
+ 7871: "mdm",
+ 7878: "owms",
+ 7880: "pss",
+ 7887: "ubroker",
+ 7900: "mevent",
+ 7901: "tnos-sp",
+ 7902: "tnos-dp",
+ 7903: "tnos-dps",
+ 7913: "qo-secure",
+ 7932: "t2-drm",
+ 7933: "t2-brm",
+ 7962: "generalsync",
+ 7967: "supercell",
+ 7979: "micromuse-ncps",
+ 7980: "quest-vista",
+ 7981: "sossd-collect",
+ 7982: "sossd-agent",
+ 7997: "pushns",
+ 7999: "irdmi2",
+ 8000: "irdmi",
+ 8001: "vcom-tunnel",
+ 8002: "teradataordbms",
+ 8003: "mcreport",
+ 8005: "mxi",
+ 8006: "wpl-analytics",
+ 8007: "warppipe",
+ 8008: "http-alt",
+ 8019: "qbdb",
+ 8020: "intu-ec-svcdisc",
+ 8021: "intu-ec-client",
+ 8022: "oa-system",
+ 8025: "ca-audit-da",
+ 8026: "ca-audit-ds",
+ 8032: "pro-ed",
+ 8033: "mindprint",
+ 8034: "vantronix-mgmt",
+ 8040: "ampify",
+ 8041: "enguity-xccetp",
+ 8042: "fs-agent",
+ 8043: "fs-server",
+ 8044: "fs-mgmt",
+ 8051: "rocrail",
+ 8052: "senomix01",
+ 8053: "senomix02",
+ 8054: "senomix03",
+ 8055: "senomix04",
+ 8056: "senomix05",
+ 8057: "senomix06",
+ 8058: "senomix07",
+ 8059: "senomix08",
+ 8066: "toad-bi-appsrvr",
+ 8067: "infi-async",
+ 8070: "ucs-isc",
+ 8074: "gadugadu",
+ 8077: "mles",
+ 8080: "http-alt",
+ 8081: "sunproxyadmin",
+ 8082: "us-cli",
+ 8083: "us-srv",
+ 8086: "d-s-n",
+ 8087: "simplifymedia",
+ 8088: "radan-http",
+ 8090: "opsmessaging",
+ 8091: "jamlink",
+ 8097: "sac",
+ 8100: "xprint-server",
+ 8101: "ldoms-migr",
+ 8102: "kz-migr",
+ 8115: "mtl8000-matrix",
+ 8116: "cp-cluster",
+ 8117: "purityrpc",
+ 8118: "privoxy",
+ 8121: "apollo-data",
+ 8122: "apollo-admin",
+ 8128: "paycash-online",
+ 8129: "paycash-wbp",
+ 8130: "indigo-vrmi",
+ 8131: "indigo-vbcp",
+ 8132: "dbabble",
+ 8140: "puppet",
+ 8148: "isdd",
+ 8153: "quantastor",
+ 8160: "patrol",
+ 8161: "patrol-snmp",
+ 8162: "lpar2rrd",
+ 8181: "intermapper",
+ 8182: "vmware-fdm",
+ 8183: "proremote",
+ 8184: "itach",
+ 8190: "gcp-rphy",
+ 8191: "limnerpressure",
+ 8192: "spytechphone",
+ 8194: "blp1",
+ 8195: "blp2",
+ 8199: "vvr-data",
+ 8200: "trivnet1",
+ 8201: "trivnet2",
+ 8204: "lm-perfworks",
+ 8205: "lm-instmgr",
+ 8206: "lm-dta",
+ 8207: "lm-sserver",
+ 8208: "lm-webwatcher",
+ 8230: "rexecj",
+ 8243: "synapse-nhttps",
+ 8270: "robot-remote",
+ 8276: "pando-sec",
+ 8280: "synapse-nhttp",
+ 8282: "libelle",
+ 8292: "blp3",
+ 8293: "hiperscan-id",
+ 8294: "blp4",
+ 8300: "tmi",
+ 8301: "amberon",
+ 8313: "hub-open-net",
+ 8320: "tnp-discover",
+ 8321: "tnp",
+ 8322: "garmin-marine",
+ 8351: "server-find",
+ 8376: "cruise-enum",
+ 8377: "cruise-swroute",
+ 8378: "cruise-config",
+ 8379: "cruise-diags",
+ 8380: "cruise-update",
+ 8383: "m2mservices",
+ 8400: "cvd",
+ 8401: "sabarsd",
+ 8402: "abarsd",
+ 8403: "admind",
+ 8404: "svcloud",
+ 8405: "svbackup",
+ 8415: "dlpx-sp",
+ 8416: "espeech",
+ 8417: "espeech-rtp",
+ 8423: "aritts",
+ 8442: "cybro-a-bus",
+ 8443: "pcsync-https",
+ 8444: "pcsync-http",
+ 8445: "copy",
+ 8450: "npmp",
+ 8457: "nexentamv",
+ 8470: "cisco-avp",
+ 8471: "pim-port",
+ 8472: "otv",
+ 8473: "vp2p",
+ 8474: "noteshare",
+ 8500: "fmtp",
+ 8501: "cmtp-mgt",
+ 8502: "ftnmtp",
+ 8554: "rtsp-alt",
+ 8555: "d-fence",
+ 8567: "dof-tunnel",
+ 8600: "asterix",
+ 8610: "canon-mfnp",
+ 8611: "canon-bjnp1",
+ 8612: "canon-bjnp2",
+ 8613: "canon-bjnp3",
+ 8614: "canon-bjnp4",
+ 8615: "imink",
+ 8665: "monetra",
+ 8666: "monetra-admin",
+ 8675: "msi-cps-rm",
+ 8686: "sun-as-jmxrmi",
+ 8688: "openremote-ctrl",
+ 8699: "vnyx",
+ 8711: "nvc",
+ 8733: "ibus",
+ 8750: "dey-keyneg",
+ 8763: "mc-appserver",
+ 8764: "openqueue",
+ 8765: "ultraseek-http",
+ 8766: "amcs",
+ 8770: "dpap",
+ 8778: "uec",
+ 8786: "msgclnt",
+ 8787: "msgsrvr",
+ 8793: "acd-pm",
+ 8800: "sunwebadmin",
+ 8804: "truecm",
+ 8873: "dxspider",
+ 8880: "cddbp-alt",
+ 8881: "galaxy4d",
+ 8883: "secure-mqtt",
+ 8888: "ddi-tcp-1",
+ 8889: "ddi-tcp-2",
+ 8890: "ddi-tcp-3",
+ 8891: "ddi-tcp-4",
+ 8892: "ddi-tcp-5",
+ 8893: "ddi-tcp-6",
+ 8894: "ddi-tcp-7",
+ 8899: "ospf-lite",
+ 8900: "jmb-cds1",
+ 8901: "jmb-cds2",
+ 8910: "manyone-http",
+ 8911: "manyone-xml",
+ 8912: "wcbackup",
+ 8913: "dragonfly",
+ 8937: "twds",
+ 8953: "ub-dns-control",
+ 8954: "cumulus-admin",
+ 8980: "nod-provider",
+ 8989: "sunwebadmins",
+ 8990: "http-wmap",
+ 8991: "https-wmap",
+ 8997: "oracle-ms-ens",
+ 8998: "canto-roboflow",
+ 8999: "bctp",
+ 9000: "cslistener",
+ 9001: "etlservicemgr",
+ 9002: "dynamid",
+ 9005: "golem",
+ 9008: "ogs-server",
+ 9009: "pichat",
+ 9010: "sdr",
+ 9020: "tambora",
+ 9021: "panagolin-ident",
+ 9022: "paragent",
+ 9023: "swa-1",
+ 9024: "swa-2",
+ 9025: "swa-3",
+ 9026: "swa-4",
+ 9050: "versiera",
+ 9051: "fio-cmgmt",
+ 9060: "CardWeb-IO",
+ 9080: "glrpc",
+ 9083: "emc-pp-mgmtsvc",
+ 9084: "aurora",
+ 9085: "ibm-rsyscon",
+ 9086: "net2display",
+ 9087: "classic",
+ 9088: "sqlexec",
+ 9089: "sqlexec-ssl",
+ 9090: "websm",
+ 9091: "xmltec-xmlmail",
+ 9092: "XmlIpcRegSvc",
+ 9093: "copycat",
+ 9100: "hp-pdl-datastr",
+ 9101: "bacula-dir",
+ 9102: "bacula-fd",
+ 9103: "bacula-sd",
+ 9104: "peerwire",
+ 9105: "xadmin",
+ 9106: "astergate",
+ 9107: "astergatefax",
+ 9119: "mxit",
+ 9122: "grcmp",
+ 9123: "grcp",
+ 9131: "dddp",
+ 9160: "apani1",
+ 9161: "apani2",
+ 9162: "apani3",
+ 9163: "apani4",
+ 9164: "apani5",
+ 9191: "sun-as-jpda",
+ 9200: "wap-wsp",
+ 9201: "wap-wsp-wtp",
+ 9202: "wap-wsp-s",
+ 9203: "wap-wsp-wtp-s",
+ 9204: "wap-vcard",
+ 9205: "wap-vcal",
+ 9206: "wap-vcard-s",
+ 9207: "wap-vcal-s",
+ 9208: "rjcdb-vcards",
+ 9209: "almobile-system",
+ 9210: "oma-mlp",
+ 9211: "oma-mlp-s",
+ 9212: "serverviewdbms",
+ 9213: "serverstart",
+ 9214: "ipdcesgbs",
+ 9215: "insis",
+ 9216: "acme",
+ 9217: "fsc-port",
+ 9222: "teamcoherence",
+ 9255: "mon",
+ 9278: "pegasus",
+ 9279: "pegasus-ctl",
+ 9280: "pgps",
+ 9281: "swtp-port1",
+ 9282: "swtp-port2",
+ 9283: "callwaveiam",
+ 9284: "visd",
+ 9285: "n2h2server",
+ 9287: "cumulus",
+ 9292: "armtechdaemon",
+ 9293: "storview",
+ 9294: "armcenterhttp",
+ 9295: "armcenterhttps",
+ 9300: "vrace",
+ 9306: "sphinxql",
+ 9312: "sphinxapi",
+ 9318: "secure-ts",
+ 9321: "guibase",
+ 9343: "mpidcmgr",
+ 9344: "mphlpdmc",
+ 9345: "rancher",
+ 9346: "ctechlicensing",
+ 9374: "fjdmimgr",
+ 9380: "boxp",
+ 9387: "d2dconfig",
+ 9388: "d2ddatatrans",
+ 9389: "adws",
+ 9390: "otp",
+ 9396: "fjinvmgr",
+ 9397: "mpidcagt",
+ 9400: "sec-t4net-srv",
+ 9401: "sec-t4net-clt",
+ 9402: "sec-pc2fax-srv",
+ 9418: "git",
+ 9443: "tungsten-https",
+ 9444: "wso2esb-console",
+ 9445: "mindarray-ca",
+ 9450: "sntlkeyssrvr",
+ 9500: "ismserver",
+ 9535: "mngsuite",
+ 9536: "laes-bf",
+ 9555: "trispen-sra",
+ 9592: "ldgateway",
+ 9593: "cba8",
+ 9594: "msgsys",
+ 9595: "pds",
+ 9596: "mercury-disc",
+ 9597: "pd-admin",
+ 9598: "vscp",
+ 9599: "robix",
+ 9600: "micromuse-ncpw",
+ 9612: "streamcomm-ds",
+ 9614: "iadt-tls",
+ 9616: "erunbook-agent",
+ 9617: "erunbook-server",
+ 9618: "condor",
+ 9628: "odbcpathway",
+ 9629: "uniport",
+ 9630: "peoctlr",
+ 9631: "peocoll",
+ 9640: "pqsflows",
+ 9666: "zoomcp",
+ 9667: "xmms2",
+ 9668: "tec5-sdctp",
+ 9694: "client-wakeup",
+ 9695: "ccnx",
+ 9700: "board-roar",
+ 9747: "l5nas-parchan",
+ 9750: "board-voip",
+ 9753: "rasadv",
+ 9762: "tungsten-http",
+ 9800: "davsrc",
+ 9801: "sstp-2",
+ 9802: "davsrcs",
+ 9875: "sapv1",
+ 9876: "sd",
+ 9888: "cyborg-systems",
+ 9889: "gt-proxy",
+ 9898: "monkeycom",
+ 9900: "iua",
+ 9909: "domaintime",
+ 9911: "sype-transport",
+ 9925: "xybrid-cloud",
+ 9950: "apc-9950",
+ 9951: "apc-9951",
+ 9952: "apc-9952",
+ 9953: "acis",
+ 9954: "hinp",
+ 9955: "alljoyn-stm",
+ 9966: "odnsp",
+ 9978: "xybrid-rt",
+ 9979: "visweather",
+ 9981: "pumpkindb",
+ 9987: "dsm-scm-target",
+ 9988: "nsesrvr",
+ 9990: "osm-appsrvr",
+ 9991: "osm-oev",
+ 9992: "palace-1",
+ 9993: "palace-2",
+ 9994: "palace-3",
+ 9995: "palace-4",
+ 9996: "palace-5",
+ 9997: "palace-6",
+ 9998: "distinct32",
+ 9999: "distinct",
+ 10000: "ndmp",
+ 10001: "scp-config",
+ 10002: "documentum",
+ 10003: "documentum-s",
+ 10004: "emcrmirccd",
+ 10005: "emcrmird",
+ 10006: "netapp-sync",
+ 10007: "mvs-capacity",
+ 10008: "octopus",
+ 10009: "swdtp-sv",
+ 10010: "rxapi",
+ 10020: "abb-hw",
+ 10050: "zabbix-agent",
+ 10051: "zabbix-trapper",
+ 10055: "qptlmd",
+ 10080: "amanda",
+ 10081: "famdc",
+ 10100: "itap-ddtp",
+ 10101: "ezmeeting-2",
+ 10102: "ezproxy-2",
+ 10103: "ezrelay",
+ 10104: "swdtp",
+ 10107: "bctp-server",
+ 10110: "nmea-0183",
+ 10113: "netiq-endpoint",
+ 10114: "netiq-qcheck",
+ 10115: "netiq-endpt",
+ 10116: "netiq-voipa",
+ 10117: "iqrm",
+ 10125: "cimple",
+ 10128: "bmc-perf-sd",
+ 10129: "bmc-gms",
+ 10160: "qb-db-server",
+ 10161: "snmptls",
+ 10162: "snmptls-trap",
+ 10200: "trisoap",
+ 10201: "rsms",
+ 10252: "apollo-relay",
+ 10260: "axis-wimp-port",
+ 10261: "tile-ml",
+ 10288: "blocks",
+ 10321: "cosir",
+ 10540: "MOS-lower",
+ 10541: "MOS-upper",
+ 10542: "MOS-aux",
+ 10543: "MOS-soap",
+ 10544: "MOS-soap-opt",
+ 10548: "serverdocs",
+ 10631: "printopia",
+ 10800: "gap",
+ 10805: "lpdg",
+ 10809: "nbd",
+ 10860: "helix",
+ 10880: "bveapi",
+ 10933: "octopustentacle",
+ 10990: "rmiaux",
+ 11000: "irisa",
+ 11001: "metasys",
+ 11095: "weave",
+ 11103: "origo-sync",
+ 11104: "netapp-icmgmt",
+ 11105: "netapp-icdata",
+ 11106: "sgi-lk",
+ 11109: "sgi-dmfmgr",
+ 11110: "sgi-soap",
+ 11111: "vce",
+ 11112: "dicom",
+ 11161: "suncacao-snmp",
+ 11162: "suncacao-jmxmp",
+ 11163: "suncacao-rmi",
+ 11164: "suncacao-csa",
+ 11165: "suncacao-websvc",
+ 11172: "oemcacao-jmxmp",
+ 11173: "t5-straton",
+ 11174: "oemcacao-rmi",
+ 11175: "oemcacao-websvc",
+ 11201: "smsqp",
+ 11202: "dcsl-backup",
+ 11208: "wifree",
+ 11211: "memcache",
+ 11319: "imip",
+ 11320: "imip-channels",
+ 11321: "arena-server",
+ 11367: "atm-uhas",
+ 11371: "hkp",
+ 11489: "asgcypresstcps",
+ 11600: "tempest-port",
+ 11623: "emc-xsw-dconfig",
+ 11720: "h323callsigalt",
+ 11723: "emc-xsw-dcache",
+ 11751: "intrepid-ssl",
+ 11796: "lanschool",
+ 11876: "xoraya",
+ 11967: "sysinfo-sp",
+ 12000: "entextxid",
+ 12001: "entextnetwk",
+ 12002: "entexthigh",
+ 12003: "entextmed",
+ 12004: "entextlow",
+ 12005: "dbisamserver1",
+ 12006: "dbisamserver2",
+ 12007: "accuracer",
+ 12008: "accuracer-dbms",
+ 12010: "edbsrvr",
+ 12012: "vipera",
+ 12013: "vipera-ssl",
+ 12109: "rets-ssl",
+ 12121: "nupaper-ss",
+ 12168: "cawas",
+ 12172: "hivep",
+ 12300: "linogridengine",
+ 12302: "rads",
+ 12321: "warehouse-sss",
+ 12322: "warehouse",
+ 12345: "italk",
+ 12753: "tsaf",
+ 12865: "netperf",
+ 13160: "i-zipqd",
+ 13216: "bcslogc",
+ 13217: "rs-pias",
+ 13218: "emc-vcas-tcp",
+ 13223: "powwow-client",
+ 13224: "powwow-server",
+ 13400: "doip-data",
+ 13720: "bprd",
+ 13721: "bpdbm",
+ 13722: "bpjava-msvc",
+ 13724: "vnetd",
+ 13782: "bpcd",
+ 13783: "vopied",
+ 13785: "nbdb",
+ 13786: "nomdb",
+ 13818: "dsmcc-config",
+ 13819: "dsmcc-session",
+ 13820: "dsmcc-passthru",
+ 13821: "dsmcc-download",
+ 13822: "dsmcc-ccp",
+ 13823: "bmdss",
+ 13894: "ucontrol",
+ 13929: "dta-systems",
+ 13930: "medevolve",
+ 14000: "scotty-ft",
+ 14001: "sua",
+ 14033: "sage-best-com1",
+ 14034: "sage-best-com2",
+ 14141: "vcs-app",
+ 14142: "icpp",
+ 14143: "icpps",
+ 14145: "gcm-app",
+ 14149: "vrts-tdd",
+ 14150: "vcscmd",
+ 14154: "vad",
+ 14250: "cps",
+ 14414: "ca-web-update",
+ 14500: "xpra",
+ 14936: "hde-lcesrvr-1",
+ 14937: "hde-lcesrvr-2",
+ 15000: "hydap",
+ 15002: "onep-tls",
+ 15345: "xpilot",
+ 15363: "3link",
+ 15555: "cisco-snat",
+ 15660: "bex-xr",
+ 15740: "ptp",
+ 15999: "programmar",
+ 16000: "fmsas",
+ 16001: "fmsascon",
+ 16002: "gsms",
+ 16020: "jwpc",
+ 16021: "jwpc-bin",
+ 16161: "sun-sea-port",
+ 16162: "solaris-audit",
+ 16309: "etb4j",
+ 16310: "pduncs",
+ 16311: "pdefmns",
+ 16360: "netserialext1",
+ 16361: "netserialext2",
+ 16367: "netserialext3",
+ 16368: "netserialext4",
+ 16384: "connected",
+ 16385: "rdgs",
+ 16619: "xoms",
+ 16665: "axon-tunnel",
+ 16789: "cadsisvr",
+ 16900: "newbay-snc-mc",
+ 16950: "sgcip",
+ 16991: "intel-rci-mp",
+ 16992: "amt-soap-http",
+ 16993: "amt-soap-https",
+ 16994: "amt-redir-tcp",
+ 16995: "amt-redir-tls",
+ 17007: "isode-dua",
+ 17184: "vestasdlp",
+ 17185: "soundsvirtual",
+ 17219: "chipper",
+ 17220: "avtp",
+ 17221: "avdecc",
+ 17223: "isa100-gci",
+ 17225: "trdp-md",
+ 17234: "integrius-stp",
+ 17235: "ssh-mgmt",
+ 17500: "db-lsp",
+ 17555: "ailith",
+ 17729: "ea",
+ 17754: "zep",
+ 17755: "zigbee-ip",
+ 17756: "zigbee-ips",
+ 17777: "sw-orion",
+ 18000: "biimenu",
+ 18104: "radpdf",
+ 18136: "racf",
+ 18181: "opsec-cvp",
+ 18182: "opsec-ufp",
+ 18183: "opsec-sam",
+ 18184: "opsec-lea",
+ 18185: "opsec-omi",
+ 18186: "ohsc",
+ 18187: "opsec-ela",
+ 18241: "checkpoint-rtm",
+ 18242: "iclid",
+ 18243: "clusterxl",
+ 18262: "gv-pf",
+ 18463: "ac-cluster",
+ 18634: "rds-ib",
+ 18635: "rds-ip",
+ 18668: "vdmmesh",
+ 18769: "ique",
+ 18881: "infotos",
+ 18888: "apc-necmp",
+ 19000: "igrid",
+ 19007: "scintilla",
+ 19020: "j-link",
+ 19191: "opsec-uaa",
+ 19194: "ua-secureagent",
+ 19220: "cora",
+ 19283: "keysrvr",
+ 19315: "keyshadow",
+ 19398: "mtrgtrans",
+ 19410: "hp-sco",
+ 19411: "hp-sca",
+ 19412: "hp-sessmon",
+ 19539: "fxuptp",
+ 19540: "sxuptp",
+ 19541: "jcp",
+ 19998: "iec-104-sec",
+ 19999: "dnp-sec",
+ 20000: "dnp",
+ 20001: "microsan",
+ 20002: "commtact-http",
+ 20003: "commtact-https",
+ 20005: "openwebnet",
+ 20013: "ss-idi",
+ 20014: "opendeploy",
+ 20034: "nburn-id",
+ 20046: "tmophl7mts",
+ 20048: "mountd",
+ 20049: "nfsrdma",
+ 20057: "avesterra",
+ 20167: "tolfab",
+ 20202: "ipdtp-port",
+ 20222: "ipulse-ics",
+ 20480: "emwavemsg",
+ 20670: "track",
+ 20999: "athand-mmp",
+ 21000: "irtrans",
+ 21010: "notezilla-lan",
+ 21221: "aigairserver",
+ 21553: "rdm-tfs",
+ 21554: "dfserver",
+ 21590: "vofr-gateway",
+ 21800: "tvpm",
+ 21845: "webphone",
+ 21846: "netspeak-is",
+ 21847: "netspeak-cs",
+ 21848: "netspeak-acd",
+ 21849: "netspeak-cps",
+ 22000: "snapenetio",
+ 22001: "optocontrol",
+ 22002: "optohost002",
+ 22003: "optohost003",
+ 22004: "optohost004",
+ 22005: "optohost004",
+ 22125: "dcap",
+ 22128: "gsidcap",
+ 22222: "easyengine",
+ 22273: "wnn6",
+ 22305: "cis",
+ 22335: "shrewd-control",
+ 22343: "cis-secure",
+ 22347: "wibukey",
+ 22350: "codemeter",
+ 22351: "codemeter-cmwan",
+ 22537: "caldsoft-backup",
+ 22555: "vocaltec-wconf",
+ 22763: "talikaserver",
+ 22800: "aws-brf",
+ 22951: "brf-gw",
+ 23000: "inovaport1",
+ 23001: "inovaport2",
+ 23002: "inovaport3",
+ 23003: "inovaport4",
+ 23004: "inovaport5",
+ 23005: "inovaport6",
+ 23053: "gntp",
+ 23294: "5afe-dir",
+ 23333: "elxmgmt",
+ 23400: "novar-dbase",
+ 23401: "novar-alarm",
+ 23402: "novar-global",
+ 23456: "aequus",
+ 23457: "aequus-alt",
+ 23546: "areaguard-neo",
+ 24000: "med-ltp",
+ 24001: "med-fsp-rx",
+ 24002: "med-fsp-tx",
+ 24003: "med-supp",
+ 24004: "med-ovw",
+ 24005: "med-ci",
+ 24006: "med-net-svc",
+ 24242: "filesphere",
+ 24249: "vista-4gl",
+ 24321: "ild",
+ 24386: "intel-rci",
+ 24465: "tonidods",
+ 24554: "binkp",
+ 24577: "bilobit",
+ 24666: "sdtvwcam",
+ 24676: "canditv",
+ 24677: "flashfiler",
+ 24678: "proactivate",
+ 24680: "tcc-http",
+ 24754: "cslg",
+ 24922: "find",
+ 25000: "icl-twobase1",
+ 25001: "icl-twobase2",
+ 25002: "icl-twobase3",
+ 25003: "icl-twobase4",
+ 25004: "icl-twobase5",
+ 25005: "icl-twobase6",
+ 25006: "icl-twobase7",
+ 25007: "icl-twobase8",
+ 25008: "icl-twobase9",
+ 25009: "icl-twobase10",
+ 25576: "sauterdongle",
+ 25604: "idtp",
+ 25793: "vocaltec-hos",
+ 25900: "tasp-net",
+ 25901: "niobserver",
+ 25902: "nilinkanalyst",
+ 25903: "niprobe",
+ 26000: "quake",
+ 26133: "scscp",
+ 26208: "wnn6-ds",
+ 26257: "cockroach",
+ 26260: "ezproxy",
+ 26261: "ezmeeting",
+ 26262: "k3software-svr",
+ 26263: "k3software-cli",
+ 26486: "exoline-tcp",
+ 26487: "exoconfig",
+ 26489: "exonet",
+ 27345: "imagepump",
+ 27442: "jesmsjc",
+ 27504: "kopek-httphead",
+ 27782: "ars-vista",
+ 27876: "astrolink",
+ 27999: "tw-auth-key",
+ 28000: "nxlmd",
+ 28001: "pqsp",
+ 28200: "voxelstorm",
+ 28240: "siemensgsm",
+ 28589: "bosswave",
+ 29167: "otmp",
+ 29999: "bingbang",
+ 30000: "ndmps",
+ 30001: "pago-services1",
+ 30002: "pago-services2",
+ 30003: "amicon-fpsu-ra",
+ 30100: "rwp",
+ 30260: "kingdomsonline",
+ 30400: "gs-realtime",
+ 30999: "ovobs",
+ 31016: "ka-sddp",
+ 31020: "autotrac-acp",
+ 31400: "pace-licensed",
+ 31416: "xqosd",
+ 31457: "tetrinet",
+ 31620: "lm-mon",
+ 31685: "dsx-monitor",
+ 31765: "gamesmith-port",
+ 31948: "iceedcp-tx",
+ 31949: "iceedcp-rx",
+ 32034: "iracinghelper",
+ 32249: "t1distproc60",
+ 32400: "plex",
+ 32483: "apm-link",
+ 32635: "sec-ntb-clnt",
+ 32636: "DMExpress",
+ 32767: "filenet-powsrm",
+ 32768: "filenet-tms",
+ 32769: "filenet-rpc",
+ 32770: "filenet-nch",
+ 32771: "filenet-rmi",
+ 32772: "filenet-pa",
+ 32773: "filenet-cm",
+ 32774: "filenet-re",
+ 32775: "filenet-pch",
+ 32776: "filenet-peior",
+ 32777: "filenet-obrok",
+ 32801: "mlsn",
+ 32811: "retp",
+ 32896: "idmgratm",
+ 33060: "mysqlx",
+ 33123: "aurora-balaena",
+ 33331: "diamondport",
+ 33333: "dgi-serv",
+ 33334: "speedtrace",
+ 33434: "traceroute",
+ 33656: "snip-slave",
+ 34249: "turbonote-2",
+ 34378: "p-net-local",
+ 34379: "p-net-remote",
+ 34567: "dhanalakshmi",
+ 34962: "profinet-rt",
+ 34963: "profinet-rtm",
+ 34964: "profinet-cm",
+ 34980: "ethercat",
+ 35000: "heathview",
+ 35001: "rt-viewer",
+ 35002: "rt-sound",
+ 35003: "rt-devicemapper",
+ 35004: "rt-classmanager",
+ 35005: "rt-labtracker",
+ 35006: "rt-helper",
+ 35100: "axio-disc",
+ 35354: "kitim",
+ 35355: "altova-lm",
+ 35356: "guttersnex",
+ 35357: "openstack-id",
+ 36001: "allpeers",
+ 36524: "febooti-aw",
+ 36602: "observium-agent",
+ 36700: "mapx",
+ 36865: "kastenxpipe",
+ 37475: "neckar",
+ 37483: "gdrive-sync",
+ 37601: "eftp",
+ 37654: "unisys-eportal",
+ 38000: "ivs-database",
+ 38001: "ivs-insertion",
+ 38002: "cresco-control",
+ 38201: "galaxy7-data",
+ 38202: "fairview",
+ 38203: "agpolicy",
+ 38800: "sruth",
+ 38865: "secrmmsafecopya",
+ 39681: "turbonote-1",
+ 40000: "safetynetp",
+ 40404: "sptx",
+ 40841: "cscp",
+ 40842: "csccredir",
+ 40843: "csccfirewall",
+ 41111: "fs-qos",
+ 41121: "tentacle",
+ 41230: "z-wave-s",
+ 41794: "crestron-cip",
+ 41795: "crestron-ctp",
+ 41796: "crestron-cips",
+ 41797: "crestron-ctps",
+ 42508: "candp",
+ 42509: "candrp",
+ 42510: "caerpc",
+ 43000: "recvr-rc",
+ 43188: "reachout",
+ 43189: "ndm-agent-port",
+ 43190: "ip-provision",
+ 43191: "noit-transport",
+ 43210: "shaperai",
+ 43439: "eq3-update",
+ 43440: "ew-mgmt",
+ 43441: "ciscocsdb",
+ 44123: "z-wave-tunnel",
+ 44321: "pmcd",
+ 44322: "pmcdproxy",
+ 44323: "pmwebapi",
+ 44444: "cognex-dataman",
+ 44553: "rbr-debug",
+ 44818: "EtherNet-IP-2",
+ 44900: "m3da",
+ 45000: "asmp",
+ 45001: "asmps",
+ 45002: "rs-status",
+ 45045: "synctest",
+ 45054: "invision-ag",
+ 45514: "cloudcheck",
+ 45678: "eba",
+ 45824: "dai-shell",
+ 45825: "qdb2service",
+ 45966: "ssr-servermgr",
+ 46336: "inedo",
+ 46998: "spremotetablet",
+ 46999: "mediabox",
+ 47000: "mbus",
+ 47001: "winrm",
+ 47557: "dbbrowse",
+ 47624: "directplaysrvr",
+ 47806: "ap",
+ 47808: "bacnet",
+ 48000: "nimcontroller",
+ 48001: "nimspooler",
+ 48002: "nimhub",
+ 48003: "nimgtw",
+ 48004: "nimbusdb",
+ 48005: "nimbusdbctrl",
+ 48049: "3gpp-cbsp",
+ 48050: "weandsf",
+ 48128: "isnetserv",
+ 48129: "blp5",
+ 48556: "com-bardac-dw",
+ 48619: "iqobject",
+ 48653: "robotraconteur",
+ 49000: "matahari",
+ 49001: "nusrp",
+}
+var udpPortNames = map[UDPPort]string{
+ 1: "tcpmux",
+ 2: "compressnet",
+ 3: "compressnet",
+ 5: "rje",
+ 7: "echo",
+ 9: "discard",
+ 11: "systat",
+ 13: "daytime",
+ 17: "qotd",
+ 18: "msp",
+ 19: "chargen",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 23: "telnet",
+ 25: "smtp",
+ 27: "nsw-fe",
+ 29: "msg-icp",
+ 31: "msg-auth",
+ 33: "dsp",
+ 37: "time",
+ 38: "rap",
+ 39: "rlp",
+ 41: "graphics",
+ 42: "name",
+ 43: "nicname",
+ 44: "mpm-flags",
+ 45: "mpm",
+ 46: "mpm-snd",
+ 48: "auditd",
+ 49: "tacacs",
+ 50: "re-mail-ck",
+ 52: "xns-time",
+ 53: "domain",
+ 54: "xns-ch",
+ 55: "isi-gl",
+ 56: "xns-auth",
+ 58: "xns-mail",
+ 62: "acas",
+ 63: "whoispp",
+ 64: "covia",
+ 65: "tacacs-ds",
+ 66: "sql-net",
+ 67: "bootps",
+ 68: "bootpc",
+ 69: "tftp",
+ 70: "gopher",
+ 71: "netrjs-1",
+ 72: "netrjs-2",
+ 73: "netrjs-3",
+ 74: "netrjs-4",
+ 76: "deos",
+ 78: "vettcp",
+ 79: "finger",
+ 80: "http",
+ 82: "xfer",
+ 83: "mit-ml-dev",
+ 84: "ctf",
+ 85: "mit-ml-dev",
+ 86: "mfcobol",
+ 88: "kerberos",
+ 89: "su-mit-tg",
+ 90: "dnsix",
+ 91: "mit-dov",
+ 92: "npp",
+ 93: "dcp",
+ 94: "objcall",
+ 95: "supdup",
+ 96: "dixie",
+ 97: "swift-rvf",
+ 98: "tacnews",
+ 99: "metagram",
+ 101: "hostname",
+ 102: "iso-tsap",
+ 103: "gppitnp",
+ 104: "acr-nema",
+ 105: "cso",
+ 106: "3com-tsmux",
+ 107: "rtelnet",
+ 108: "snagas",
+ 109: "pop2",
+ 110: "pop3",
+ 111: "sunrpc",
+ 112: "mcidas",
+ 113: "auth",
+ 115: "sftp",
+ 116: "ansanotify",
+ 117: "uucp-path",
+ 118: "sqlserv",
+ 119: "nntp",
+ 120: "cfdptkt",
+ 121: "erpc",
+ 122: "smakynet",
+ 123: "ntp",
+ 124: "ansatrader",
+ 125: "locus-map",
+ 126: "nxedit",
+ 127: "locus-con",
+ 128: "gss-xlicen",
+ 129: "pwdgen",
+ 130: "cisco-fna",
+ 131: "cisco-tna",
+ 132: "cisco-sys",
+ 133: "statsrv",
+ 134: "ingres-net",
+ 135: "epmap",
+ 136: "profile",
+ 137: "netbios-ns",
+ 138: "netbios-dgm",
+ 139: "netbios-ssn",
+ 140: "emfis-data",
+ 141: "emfis-cntl",
+ 142: "bl-idm",
+ 143: "imap",
+ 144: "uma",
+ 145: "uaac",
+ 146: "iso-tp0",
+ 147: "iso-ip",
+ 148: "jargon",
+ 149: "aed-512",
+ 150: "sql-net",
+ 151: "hems",
+ 152: "bftp",
+ 153: "sgmp",
+ 154: "netsc-prod",
+ 155: "netsc-dev",
+ 156: "sqlsrv",
+ 157: "knet-cmp",
+ 158: "pcmail-srv",
+ 159: "nss-routing",
+ 160: "sgmp-traps",
+ 161: "snmp",
+ 162: "snmptrap",
+ 163: "cmip-man",
+ 164: "cmip-agent",
+ 165: "xns-courier",
+ 166: "s-net",
+ 167: "namp",
+ 168: "rsvd",
+ 169: "send",
+ 170: "print-srv",
+ 171: "multiplex",
+ 172: "cl-1",
+ 173: "xyplex-mux",
+ 174: "mailq",
+ 175: "vmnet",
+ 176: "genrad-mux",
+ 177: "xdmcp",
+ 178: "nextstep",
+ 179: "bgp",
+ 180: "ris",
+ 181: "unify",
+ 182: "audit",
+ 183: "ocbinder",
+ 184: "ocserver",
+ 185: "remote-kis",
+ 186: "kis",
+ 187: "aci",
+ 188: "mumps",
+ 189: "qft",
+ 190: "gacp",
+ 191: "prospero",
+ 192: "osu-nms",
+ 193: "srmp",
+ 194: "irc",
+ 195: "dn6-nlm-aud",
+ 196: "dn6-smm-red",
+ 197: "dls",
+ 198: "dls-mon",
+ 199: "smux",
+ 200: "src",
+ 201: "at-rtmp",
+ 202: "at-nbp",
+ 203: "at-3",
+ 204: "at-echo",
+ 205: "at-5",
+ 206: "at-zis",
+ 207: "at-7",
+ 208: "at-8",
+ 209: "qmtp",
+ 210: "z39-50",
+ 211: "914c-g",
+ 212: "anet",
+ 213: "ipx",
+ 214: "vmpwscs",
+ 215: "softpc",
+ 216: "CAIlic",
+ 217: "dbase",
+ 218: "mpp",
+ 219: "uarps",
+ 220: "imap3",
+ 221: "fln-spx",
+ 222: "rsh-spx",
+ 223: "cdc",
+ 224: "masqdialer",
+ 242: "direct",
+ 243: "sur-meas",
+ 244: "inbusiness",
+ 245: "link",
+ 246: "dsp3270",
+ 247: "subntbcst-tftp",
+ 248: "bhfhs",
+ 256: "rap",
+ 257: "set",
+ 259: "esro-gen",
+ 260: "openport",
+ 261: "nsiiops",
+ 262: "arcisdms",
+ 263: "hdap",
+ 264: "bgmp",
+ 265: "x-bone-ctl",
+ 266: "sst",
+ 267: "td-service",
+ 268: "td-replica",
+ 269: "manet",
+ 270: "gist",
+ 280: "http-mgmt",
+ 281: "personal-link",
+ 282: "cableport-ax",
+ 283: "rescap",
+ 284: "corerjd",
+ 286: "fxp",
+ 287: "k-block",
+ 308: "novastorbakcup",
+ 309: "entrusttime",
+ 310: "bhmds",
+ 311: "asip-webadmin",
+ 312: "vslmp",
+ 313: "magenta-logic",
+ 314: "opalis-robot",
+ 315: "dpsi",
+ 316: "decauth",
+ 317: "zannet",
+ 318: "pkix-timestamp",
+ 319: "ptp-event",
+ 320: "ptp-general",
+ 321: "pip",
+ 322: "rtsps",
+ 333: "texar",
+ 344: "pdap",
+ 345: "pawserv",
+ 346: "zserv",
+ 347: "fatserv",
+ 348: "csi-sgwp",
+ 349: "mftp",
+ 350: "matip-type-a",
+ 351: "matip-type-b",
+ 352: "dtag-ste-sb",
+ 353: "ndsauth",
+ 354: "bh611",
+ 355: "datex-asn",
+ 356: "cloanto-net-1",
+ 357: "bhevent",
+ 358: "shrinkwrap",
+ 359: "nsrmp",
+ 360: "scoi2odialog",
+ 361: "semantix",
+ 362: "srssend",
+ 363: "rsvp-tunnel",
+ 364: "aurora-cmgr",
+ 365: "dtk",
+ 366: "odmr",
+ 367: "mortgageware",
+ 368: "qbikgdp",
+ 369: "rpc2portmap",
+ 370: "codaauth2",
+ 371: "clearcase",
+ 372: "ulistproc",
+ 373: "legent-1",
+ 374: "legent-2",
+ 375: "hassle",
+ 376: "nip",
+ 377: "tnETOS",
+ 378: "dsETOS",
+ 379: "is99c",
+ 380: "is99s",
+ 381: "hp-collector",
+ 382: "hp-managed-node",
+ 383: "hp-alarm-mgr",
+ 384: "arns",
+ 385: "ibm-app",
+ 386: "asa",
+ 387: "aurp",
+ 388: "unidata-ldm",
+ 389: "ldap",
+ 390: "uis",
+ 391: "synotics-relay",
+ 392: "synotics-broker",
+ 393: "meta5",
+ 394: "embl-ndt",
+ 395: "netcp",
+ 396: "netware-ip",
+ 397: "mptn",
+ 398: "kryptolan",
+ 399: "iso-tsap-c2",
+ 400: "osb-sd",
+ 401: "ups",
+ 402: "genie",
+ 403: "decap",
+ 404: "nced",
+ 405: "ncld",
+ 406: "imsp",
+ 407: "timbuktu",
+ 408: "prm-sm",
+ 409: "prm-nm",
+ 410: "decladebug",
+ 411: "rmt",
+ 412: "synoptics-trap",
+ 413: "smsp",
+ 414: "infoseek",
+ 415: "bnet",
+ 416: "silverplatter",
+ 417: "onmux",
+ 418: "hyper-g",
+ 419: "ariel1",
+ 420: "smpte",
+ 421: "ariel2",
+ 422: "ariel3",
+ 423: "opc-job-start",
+ 424: "opc-job-track",
+ 425: "icad-el",
+ 426: "smartsdp",
+ 427: "svrloc",
+ 428: "ocs-cmu",
+ 429: "ocs-amu",
+ 430: "utmpsd",
+ 431: "utmpcd",
+ 432: "iasd",
+ 433: "nnsp",
+ 434: "mobileip-agent",
+ 435: "mobilip-mn",
+ 436: "dna-cml",
+ 437: "comscm",
+ 438: "dsfgw",
+ 439: "dasp",
+ 440: "sgcp",
+ 441: "decvms-sysmgt",
+ 442: "cvc-hostd",
+ 443: "https",
+ 444: "snpp",
+ 445: "microsoft-ds",
+ 446: "ddm-rdb",
+ 447: "ddm-dfm",
+ 448: "ddm-ssl",
+ 449: "as-servermap",
+ 450: "tserver",
+ 451: "sfs-smp-net",
+ 452: "sfs-config",
+ 453: "creativeserver",
+ 454: "contentserver",
+ 455: "creativepartnr",
+ 456: "macon-udp",
+ 457: "scohelp",
+ 458: "appleqtc",
+ 459: "ampr-rcmd",
+ 460: "skronk",
+ 461: "datasurfsrv",
+ 462: "datasurfsrvsec",
+ 463: "alpes",
+ 464: "kpasswd",
+ 465: "igmpv3lite",
+ 466: "digital-vrc",
+ 467: "mylex-mapd",
+ 468: "photuris",
+ 469: "rcp",
+ 470: "scx-proxy",
+ 471: "mondex",
+ 472: "ljk-login",
+ 473: "hybrid-pop",
+ 474: "tn-tl-w2",
+ 475: "tcpnethaspsrv",
+ 476: "tn-tl-fd1",
+ 477: "ss7ns",
+ 478: "spsc",
+ 479: "iafserver",
+ 480: "iafdbase",
+ 481: "ph",
+ 482: "bgs-nsi",
+ 483: "ulpnet",
+ 484: "integra-sme",
+ 485: "powerburst",
+ 486: "avian",
+ 487: "saft",
+ 488: "gss-http",
+ 489: "nest-protocol",
+ 490: "micom-pfs",
+ 491: "go-login",
+ 492: "ticf-1",
+ 493: "ticf-2",
+ 494: "pov-ray",
+ 495: "intecourier",
+ 496: "pim-rp-disc",
+ 497: "retrospect",
+ 498: "siam",
+ 499: "iso-ill",
+ 500: "isakmp",
+ 501: "stmf",
+ 502: "mbap",
+ 503: "intrinsa",
+ 504: "citadel",
+ 505: "mailbox-lm",
+ 506: "ohimsrv",
+ 507: "crs",
+ 508: "xvttp",
+ 509: "snare",
+ 510: "fcp",
+ 511: "passgo",
+ 512: "comsat",
+ 513: "who",
+ 514: "syslog",
+ 515: "printer",
+ 516: "videotex",
+ 517: "talk",
+ 518: "ntalk",
+ 519: "utime",
+ 520: "router",
+ 521: "ripng",
+ 522: "ulp",
+ 523: "ibm-db2",
+ 524: "ncp",
+ 525: "timed",
+ 526: "tempo",
+ 527: "stx",
+ 528: "custix",
+ 529: "irc-serv",
+ 530: "courier",
+ 531: "conference",
+ 532: "netnews",
+ 533: "netwall",
+ 534: "windream",
+ 535: "iiop",
+ 536: "opalis-rdv",
+ 537: "nmsp",
+ 538: "gdomap",
+ 539: "apertus-ldp",
+ 540: "uucp",
+ 541: "uucp-rlogin",
+ 542: "commerce",
+ 543: "klogin",
+ 544: "kshell",
+ 545: "appleqtcsrvr",
+ 546: "dhcpv6-client",
+ 547: "dhcpv6-server",
+ 548: "afpovertcp",
+ 549: "idfp",
+ 550: "new-rwho",
+ 551: "cybercash",
+ 552: "devshr-nts",
+ 553: "pirp",
+ 554: "rtsp",
+ 555: "dsf",
+ 556: "remotefs",
+ 557: "openvms-sysipc",
+ 558: "sdnskmp",
+ 559: "teedtap",
+ 560: "rmonitor",
+ 561: "monitor",
+ 562: "chshell",
+ 563: "nntps",
+ 564: "9pfs",
+ 565: "whoami",
+ 566: "streettalk",
+ 567: "banyan-rpc",
+ 568: "ms-shuttle",
+ 569: "ms-rome",
+ 570: "meter",
+ 571: "meter",
+ 572: "sonar",
+ 573: "banyan-vip",
+ 574: "ftp-agent",
+ 575: "vemmi",
+ 576: "ipcd",
+ 577: "vnas",
+ 578: "ipdd",
+ 579: "decbsrv",
+ 580: "sntp-heartbeat",
+ 581: "bdp",
+ 582: "scc-security",
+ 583: "philips-vc",
+ 584: "keyserver",
+ 586: "password-chg",
+ 587: "submission",
+ 588: "cal",
+ 589: "eyelink",
+ 590: "tns-cml",
+ 591: "http-alt",
+ 592: "eudora-set",
+ 593: "http-rpc-epmap",
+ 594: "tpip",
+ 595: "cab-protocol",
+ 596: "smsd",
+ 597: "ptcnameservice",
+ 598: "sco-websrvrmg3",
+ 599: "acp",
+ 600: "ipcserver",
+ 601: "syslog-conn",
+ 602: "xmlrpc-beep",
+ 603: "idxp",
+ 604: "tunnel",
+ 605: "soap-beep",
+ 606: "urm",
+ 607: "nqs",
+ 608: "sift-uft",
+ 609: "npmp-trap",
+ 610: "npmp-local",
+ 611: "npmp-gui",
+ 612: "hmmp-ind",
+ 613: "hmmp-op",
+ 614: "sshell",
+ 615: "sco-inetmgr",
+ 616: "sco-sysmgr",
+ 617: "sco-dtmgr",
+ 618: "dei-icda",
+ 619: "compaq-evm",
+ 620: "sco-websrvrmgr",
+ 621: "escp-ip",
+ 622: "collaborator",
+ 623: "asf-rmcp",
+ 624: "cryptoadmin",
+ 625: "dec-dlm",
+ 626: "asia",
+ 627: "passgo-tivoli",
+ 628: "qmqp",
+ 629: "3com-amp3",
+ 630: "rda",
+ 631: "ipp",
+ 632: "bmpp",
+ 633: "servstat",
+ 634: "ginad",
+ 635: "rlzdbase",
+ 636: "ldaps",
+ 637: "lanserver",
+ 638: "mcns-sec",
+ 639: "msdp",
+ 640: "entrust-sps",
+ 641: "repcmd",
+ 642: "esro-emsdp",
+ 643: "sanity",
+ 644: "dwr",
+ 645: "pssc",
+ 646: "ldp",
+ 647: "dhcp-failover",
+ 648: "rrp",
+ 649: "cadview-3d",
+ 650: "obex",
+ 651: "ieee-mms",
+ 652: "hello-port",
+ 653: "repscmd",
+ 654: "aodv",
+ 655: "tinc",
+ 656: "spmp",
+ 657: "rmc",
+ 658: "tenfold",
+ 660: "mac-srvr-admin",
+ 661: "hap",
+ 662: "pftp",
+ 663: "purenoise",
+ 664: "asf-secure-rmcp",
+ 665: "sun-dr",
+ 666: "mdqs",
+ 667: "disclose",
+ 668: "mecomm",
+ 669: "meregister",
+ 670: "vacdsm-sws",
+ 671: "vacdsm-app",
+ 672: "vpps-qua",
+ 673: "cimplex",
+ 674: "acap",
+ 675: "dctp",
+ 676: "vpps-via",
+ 677: "vpp",
+ 678: "ggf-ncp",
+ 679: "mrm",
+ 680: "entrust-aaas",
+ 681: "entrust-aams",
+ 682: "xfr",
+ 683: "corba-iiop",
+ 684: "corba-iiop-ssl",
+ 685: "mdc-portmapper",
+ 686: "hcp-wismar",
+ 687: "asipregistry",
+ 688: "realm-rusd",
+ 689: "nmap",
+ 690: "vatp",
+ 691: "msexch-routing",
+ 692: "hyperwave-isp",
+ 693: "connendp",
+ 694: "ha-cluster",
+ 695: "ieee-mms-ssl",
+ 696: "rushd",
+ 697: "uuidgen",
+ 698: "olsr",
+ 699: "accessnetwork",
+ 700: "epp",
+ 701: "lmp",
+ 702: "iris-beep",
+ 704: "elcsd",
+ 705: "agentx",
+ 706: "silc",
+ 707: "borland-dsj",
+ 709: "entrust-kmsh",
+ 710: "entrust-ash",
+ 711: "cisco-tdp",
+ 712: "tbrpf",
+ 713: "iris-xpc",
+ 714: "iris-xpcs",
+ 715: "iris-lwz",
+ 716: "pana",
+ 729: "netviewdm1",
+ 730: "netviewdm2",
+ 731: "netviewdm3",
+ 741: "netgw",
+ 742: "netrcs",
+ 744: "flexlm",
+ 747: "fujitsu-dev",
+ 748: "ris-cm",
+ 749: "kerberos-adm",
+ 750: "loadav",
+ 751: "pump",
+ 752: "qrh",
+ 753: "rrh",
+ 754: "tell",
+ 758: "nlogin",
+ 759: "con",
+ 760: "ns",
+ 761: "rxe",
+ 762: "quotad",
+ 763: "cycleserv",
+ 764: "omserv",
+ 765: "webster",
+ 767: "phonebook",
+ 769: "vid",
+ 770: "cadlock",
+ 771: "rtip",
+ 772: "cycleserv2",
+ 773: "notify",
+ 774: "acmaint-dbd",
+ 775: "acmaint-transd",
+ 776: "wpages",
+ 777: "multiling-http",
+ 780: "wpgs",
+ 800: "mdbs-daemon",
+ 801: "device",
+ 802: "mbap-s",
+ 810: "fcp-udp",
+ 828: "itm-mcell-s",
+ 829: "pkix-3-ca-ra",
+ 830: "netconf-ssh",
+ 831: "netconf-beep",
+ 832: "netconfsoaphttp",
+ 833: "netconfsoapbeep",
+ 847: "dhcp-failover2",
+ 848: "gdoi",
+ 853: "domain-s",
+ 854: "dlep",
+ 860: "iscsi",
+ 861: "owamp-control",
+ 862: "twamp-control",
+ 873: "rsync",
+ 886: "iclcnet-locate",
+ 887: "iclcnet-svinfo",
+ 888: "accessbuilder",
+ 900: "omginitialrefs",
+ 901: "smpnameres",
+ 902: "ideafarm-door",
+ 903: "ideafarm-panic",
+ 910: "kink",
+ 911: "xact-backup",
+ 912: "apex-mesh",
+ 913: "apex-edge",
+ 989: "ftps-data",
+ 990: "ftps",
+ 991: "nas",
+ 992: "telnets",
+ 993: "imaps",
+ 995: "pop3s",
+ 996: "vsinet",
+ 997: "maitrd",
+ 998: "puparp",
+ 999: "applix",
+ 1000: "cadlock2",
+ 1010: "surf",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1025: "blackjack",
+ 1026: "cap",
+ 1027: "6a44",
+ 1029: "solid-mux",
+ 1033: "netinfo-local",
+ 1034: "activesync",
+ 1035: "mxxrlogin",
+ 1036: "nsstp",
+ 1037: "ams",
+ 1038: "mtqp",
+ 1039: "sbl",
+ 1040: "netarx",
+ 1041: "danf-ak2",
+ 1042: "afrog",
+ 1043: "boinc-client",
+ 1044: "dcutility",
+ 1045: "fpitp",
+ 1046: "wfremotertm",
+ 1047: "neod1",
+ 1048: "neod2",
+ 1049: "td-postman",
+ 1050: "cma",
+ 1051: "optima-vnet",
+ 1052: "ddt",
+ 1053: "remote-as",
+ 1054: "brvread",
+ 1055: "ansyslmd",
+ 1056: "vfo",
+ 1057: "startron",
+ 1058: "nim",
+ 1059: "nimreg",
+ 1060: "polestar",
+ 1061: "kiosk",
+ 1062: "veracity",
+ 1063: "kyoceranetdev",
+ 1064: "jstel",
+ 1065: "syscomlan",
+ 1066: "fpo-fns",
+ 1067: "instl-boots",
+ 1068: "instl-bootc",
+ 1069: "cognex-insight",
+ 1070: "gmrupdateserv",
+ 1071: "bsquare-voip",
+ 1072: "cardax",
+ 1073: "bridgecontrol",
+ 1074: "warmspotMgmt",
+ 1075: "rdrmshc",
+ 1076: "dab-sti-c",
+ 1077: "imgames",
+ 1078: "avocent-proxy",
+ 1079: "asprovatalk",
+ 1080: "socks",
+ 1081: "pvuniwien",
+ 1082: "amt-esd-prot",
+ 1083: "ansoft-lm-1",
+ 1084: "ansoft-lm-2",
+ 1085: "webobjects",
+ 1086: "cplscrambler-lg",
+ 1087: "cplscrambler-in",
+ 1088: "cplscrambler-al",
+ 1089: "ff-annunc",
+ 1090: "ff-fms",
+ 1091: "ff-sm",
+ 1092: "obrpd",
+ 1093: "proofd",
+ 1094: "rootd",
+ 1095: "nicelink",
+ 1096: "cnrprotocol",
+ 1097: "sunclustermgr",
+ 1098: "rmiactivation",
+ 1099: "rmiregistry",
+ 1100: "mctp",
+ 1101: "pt2-discover",
+ 1102: "adobeserver-1",
+ 1103: "adobeserver-2",
+ 1104: "xrl",
+ 1105: "ftranhc",
+ 1106: "isoipsigport-1",
+ 1107: "isoipsigport-2",
+ 1108: "ratio-adp",
+ 1110: "nfsd-keepalive",
+ 1111: "lmsocialserver",
+ 1112: "icp",
+ 1113: "ltp-deepspace",
+ 1114: "mini-sql",
+ 1115: "ardus-trns",
+ 1116: "ardus-cntl",
+ 1117: "ardus-mtrns",
+ 1118: "sacred",
+ 1119: "bnetgame",
+ 1120: "bnetfile",
+ 1121: "rmpp",
+ 1122: "availant-mgr",
+ 1123: "murray",
+ 1124: "hpvmmcontrol",
+ 1125: "hpvmmagent",
+ 1126: "hpvmmdata",
+ 1127: "kwdb-commn",
+ 1128: "saphostctrl",
+ 1129: "saphostctrls",
+ 1130: "casp",
+ 1131: "caspssl",
+ 1132: "kvm-via-ip",
+ 1133: "dfn",
+ 1134: "aplx",
+ 1135: "omnivision",
+ 1136: "hhb-gateway",
+ 1137: "trim",
+ 1138: "encrypted-admin",
+ 1139: "evm",
+ 1140: "autonoc",
+ 1141: "mxomss",
+ 1142: "edtools",
+ 1143: "imyx",
+ 1144: "fuscript",
+ 1145: "x9-icue",
+ 1146: "audit-transfer",
+ 1147: "capioverlan",
+ 1148: "elfiq-repl",
+ 1149: "bvtsonar",
+ 1150: "blaze",
+ 1151: "unizensus",
+ 1152: "winpoplanmess",
+ 1153: "c1222-acse",
+ 1154: "resacommunity",
+ 1155: "nfa",
+ 1156: "iascontrol-oms",
+ 1157: "iascontrol",
+ 1158: "dbcontrol-oms",
+ 1159: "oracle-oms",
+ 1160: "olsv",
+ 1161: "health-polling",
+ 1162: "health-trap",
+ 1163: "sddp",
+ 1164: "qsm-proxy",
+ 1165: "qsm-gui",
+ 1166: "qsm-remote",
+ 1167: "cisco-ipsla",
+ 1168: "vchat",
+ 1169: "tripwire",
+ 1170: "atc-lm",
+ 1171: "atc-appserver",
+ 1172: "dnap",
+ 1173: "d-cinema-rrp",
+ 1174: "fnet-remote-ui",
+ 1175: "dossier",
+ 1176: "indigo-server",
+ 1177: "dkmessenger",
+ 1178: "sgi-storman",
+ 1179: "b2n",
+ 1180: "mc-client",
+ 1181: "3comnetman",
+ 1182: "accelenet-data",
+ 1183: "llsurfup-http",
+ 1184: "llsurfup-https",
+ 1185: "catchpole",
+ 1186: "mysql-cluster",
+ 1187: "alias",
+ 1188: "hp-webadmin",
+ 1189: "unet",
+ 1190: "commlinx-avl",
+ 1191: "gpfs",
+ 1192: "caids-sensor",
+ 1193: "fiveacross",
+ 1194: "openvpn",
+ 1195: "rsf-1",
+ 1196: "netmagic",
+ 1197: "carrius-rshell",
+ 1198: "cajo-discovery",
+ 1199: "dmidi",
+ 1200: "scol",
+ 1201: "nucleus-sand",
+ 1202: "caiccipc",
+ 1203: "ssslic-mgr",
+ 1204: "ssslog-mgr",
+ 1205: "accord-mgc",
+ 1206: "anthony-data",
+ 1207: "metasage",
+ 1208: "seagull-ais",
+ 1209: "ipcd3",
+ 1210: "eoss",
+ 1211: "groove-dpp",
+ 1212: "lupa",
+ 1213: "mpc-lifenet",
+ 1214: "kazaa",
+ 1215: "scanstat-1",
+ 1216: "etebac5",
+ 1217: "hpss-ndapi",
+ 1218: "aeroflight-ads",
+ 1219: "aeroflight-ret",
+ 1220: "qt-serveradmin",
+ 1221: "sweetware-apps",
+ 1222: "nerv",
+ 1223: "tgp",
+ 1224: "vpnz",
+ 1225: "slinkysearch",
+ 1226: "stgxfws",
+ 1227: "dns2go",
+ 1228: "florence",
+ 1229: "zented",
+ 1230: "periscope",
+ 1231: "menandmice-lpm",
+ 1232: "first-defense",
+ 1233: "univ-appserver",
+ 1234: "search-agent",
+ 1235: "mosaicsyssvc1",
+ 1236: "bvcontrol",
+ 1237: "tsdos390",
+ 1238: "hacl-qs",
+ 1239: "nmsd",
+ 1240: "instantia",
+ 1241: "nessus",
+ 1242: "nmasoverip",
+ 1243: "serialgateway",
+ 1244: "isbconference1",
+ 1245: "isbconference2",
+ 1246: "payrouter",
+ 1247: "visionpyramid",
+ 1248: "hermes",
+ 1249: "mesavistaco",
+ 1250: "swldy-sias",
+ 1251: "servergraph",
+ 1252: "bspne-pcc",
+ 1253: "q55-pcc",
+ 1254: "de-noc",
+ 1255: "de-cache-query",
+ 1256: "de-server",
+ 1257: "shockwave2",
+ 1258: "opennl",
+ 1259: "opennl-voice",
+ 1260: "ibm-ssd",
+ 1261: "mpshrsv",
+ 1262: "qnts-orb",
+ 1263: "dka",
+ 1264: "prat",
+ 1265: "dssiapi",
+ 1266: "dellpwrappks",
+ 1267: "epc",
+ 1268: "propel-msgsys",
+ 1269: "watilapp",
+ 1270: "opsmgr",
+ 1271: "excw",
+ 1272: "cspmlockmgr",
+ 1273: "emc-gateway",
+ 1274: "t1distproc",
+ 1275: "ivcollector",
+ 1277: "miva-mqs",
+ 1278: "dellwebadmin-1",
+ 1279: "dellwebadmin-2",
+ 1280: "pictrography",
+ 1281: "healthd",
+ 1282: "emperion",
+ 1283: "productinfo",
+ 1284: "iee-qfx",
+ 1285: "neoiface",
+ 1286: "netuitive",
+ 1287: "routematch",
+ 1288: "navbuddy",
+ 1289: "jwalkserver",
+ 1290: "winjaserver",
+ 1291: "seagulllms",
+ 1292: "dsdn",
+ 1293: "pkt-krb-ipsec",
+ 1294: "cmmdriver",
+ 1295: "ehtp",
+ 1296: "dproxy",
+ 1297: "sdproxy",
+ 1298: "lpcp",
+ 1299: "hp-sci",
+ 1300: "h323hostcallsc",
+ 1301: "ci3-software-1",
+ 1302: "ci3-software-2",
+ 1303: "sftsrv",
+ 1304: "boomerang",
+ 1305: "pe-mike",
+ 1306: "re-conn-proto",
+ 1307: "pacmand",
+ 1308: "odsi",
+ 1309: "jtag-server",
+ 1310: "husky",
+ 1311: "rxmon",
+ 1312: "sti-envision",
+ 1313: "bmc-patroldb",
+ 1314: "pdps",
+ 1315: "els",
+ 1316: "exbit-escp",
+ 1317: "vrts-ipcserver",
+ 1318: "krb5gatekeeper",
+ 1319: "amx-icsp",
+ 1320: "amx-axbnet",
+ 1321: "pip",
+ 1322: "novation",
+ 1323: "brcd",
+ 1324: "delta-mcp",
+ 1325: "dx-instrument",
+ 1326: "wimsic",
+ 1327: "ultrex",
+ 1328: "ewall",
+ 1329: "netdb-export",
+ 1330: "streetperfect",
+ 1331: "intersan",
+ 1332: "pcia-rxp-b",
+ 1333: "passwrd-policy",
+ 1334: "writesrv",
+ 1335: "digital-notary",
+ 1336: "ischat",
+ 1337: "menandmice-dns",
+ 1338: "wmc-log-svc",
+ 1339: "kjtsiteserver",
+ 1340: "naap",
+ 1341: "qubes",
+ 1342: "esbroker",
+ 1343: "re101",
+ 1344: "icap",
+ 1345: "vpjp",
+ 1346: "alta-ana-lm",
+ 1347: "bbn-mmc",
+ 1348: "bbn-mmx",
+ 1349: "sbook",
+ 1350: "editbench",
+ 1351: "equationbuilder",
+ 1352: "lotusnote",
+ 1353: "relief",
+ 1354: "XSIP-network",
+ 1355: "intuitive-edge",
+ 1356: "cuillamartin",
+ 1357: "pegboard",
+ 1358: "connlcli",
+ 1359: "ftsrv",
+ 1360: "mimer",
+ 1361: "linx",
+ 1362: "timeflies",
+ 1363: "ndm-requester",
+ 1364: "ndm-server",
+ 1365: "adapt-sna",
+ 1366: "netware-csp",
+ 1367: "dcs",
+ 1368: "screencast",
+ 1369: "gv-us",
+ 1370: "us-gv",
+ 1371: "fc-cli",
+ 1372: "fc-ser",
+ 1373: "chromagrafx",
+ 1374: "molly",
+ 1375: "bytex",
+ 1376: "ibm-pps",
+ 1377: "cichlid",
+ 1378: "elan",
+ 1379: "dbreporter",
+ 1380: "telesis-licman",
+ 1381: "apple-licman",
+ 1382: "udt-os",
+ 1383: "gwha",
+ 1384: "os-licman",
+ 1385: "atex-elmd",
+ 1386: "checksum",
+ 1387: "cadsi-lm",
+ 1388: "objective-dbc",
+ 1389: "iclpv-dm",
+ 1390: "iclpv-sc",
+ 1391: "iclpv-sas",
+ 1392: "iclpv-pm",
+ 1393: "iclpv-nls",
+ 1394: "iclpv-nlc",
+ 1395: "iclpv-wsm",
+ 1396: "dvl-activemail",
+ 1397: "audio-activmail",
+ 1398: "video-activmail",
+ 1399: "cadkey-licman",
+ 1400: "cadkey-tablet",
+ 1401: "goldleaf-licman",
+ 1402: "prm-sm-np",
+ 1403: "prm-nm-np",
+ 1404: "igi-lm",
+ 1405: "ibm-res",
+ 1406: "netlabs-lm",
+ 1408: "sophia-lm",
+ 1409: "here-lm",
+ 1410: "hiq",
+ 1411: "af",
+ 1412: "innosys",
+ 1413: "innosys-acl",
+ 1414: "ibm-mqseries",
+ 1415: "dbstar",
+ 1416: "novell-lu6-2",
+ 1417: "timbuktu-srv1",
+ 1418: "timbuktu-srv2",
+ 1419: "timbuktu-srv3",
+ 1420: "timbuktu-srv4",
+ 1421: "gandalf-lm",
+ 1422: "autodesk-lm",
+ 1423: "essbase",
+ 1424: "hybrid",
+ 1425: "zion-lm",
+ 1426: "sais",
+ 1427: "mloadd",
+ 1428: "informatik-lm",
+ 1429: "nms",
+ 1430: "tpdu",
+ 1431: "rgtp",
+ 1432: "blueberry-lm",
+ 1433: "ms-sql-s",
+ 1434: "ms-sql-m",
+ 1435: "ibm-cics",
+ 1436: "saism",
+ 1437: "tabula",
+ 1438: "eicon-server",
+ 1439: "eicon-x25",
+ 1440: "eicon-slp",
+ 1441: "cadis-1",
+ 1442: "cadis-2",
+ 1443: "ies-lm",
+ 1444: "marcam-lm",
+ 1445: "proxima-lm",
+ 1446: "ora-lm",
+ 1447: "apri-lm",
+ 1448: "oc-lm",
+ 1449: "peport",
+ 1450: "dwf",
+ 1451: "infoman",
+ 1452: "gtegsc-lm",
+ 1453: "genie-lm",
+ 1454: "interhdl-elmd",
+ 1455: "esl-lm",
+ 1456: "dca",
+ 1457: "valisys-lm",
+ 1458: "nrcabq-lm",
+ 1459: "proshare1",
+ 1460: "proshare2",
+ 1461: "ibm-wrless-lan",
+ 1462: "world-lm",
+ 1463: "nucleus",
+ 1464: "msl-lmd",
+ 1465: "pipes",
+ 1466: "oceansoft-lm",
+ 1467: "csdmbase",
+ 1468: "csdm",
+ 1469: "aal-lm",
+ 1470: "uaiact",
+ 1471: "csdmbase",
+ 1472: "csdm",
+ 1473: "openmath",
+ 1474: "telefinder",
+ 1475: "taligent-lm",
+ 1476: "clvm-cfg",
+ 1477: "ms-sna-server",
+ 1478: "ms-sna-base",
+ 1479: "dberegister",
+ 1480: "pacerforum",
+ 1481: "airs",
+ 1482: "miteksys-lm",
+ 1483: "afs",
+ 1484: "confluent",
+ 1485: "lansource",
+ 1486: "nms-topo-serv",
+ 1487: "localinfosrvr",
+ 1488: "docstor",
+ 1489: "dmdocbroker",
+ 1490: "insitu-conf",
+ 1492: "stone-design-1",
+ 1493: "netmap-lm",
+ 1494: "ica",
+ 1495: "cvc",
+ 1496: "liberty-lm",
+ 1497: "rfx-lm",
+ 1498: "sybase-sqlany",
+ 1499: "fhc",
+ 1500: "vlsi-lm",
+ 1501: "saiscm",
+ 1502: "shivadiscovery",
+ 1503: "imtc-mcs",
+ 1504: "evb-elm",
+ 1505: "funkproxy",
+ 1506: "utcd",
+ 1507: "symplex",
+ 1508: "diagmond",
+ 1509: "robcad-lm",
+ 1510: "mvx-lm",
+ 1511: "3l-l1",
+ 1512: "wins",
+ 1513: "fujitsu-dtc",
+ 1514: "fujitsu-dtcns",
+ 1515: "ifor-protocol",
+ 1516: "vpad",
+ 1517: "vpac",
+ 1518: "vpvd",
+ 1519: "vpvc",
+ 1520: "atm-zip-office",
+ 1521: "ncube-lm",
+ 1522: "ricardo-lm",
+ 1523: "cichild-lm",
+ 1524: "ingreslock",
+ 1525: "orasrv",
+ 1526: "pdap-np",
+ 1527: "tlisrv",
+ 1528: "ngr-t",
+ 1529: "coauthor",
+ 1530: "rap-service",
+ 1531: "rap-listen",
+ 1532: "miroconnect",
+ 1533: "virtual-places",
+ 1534: "micromuse-lm",
+ 1535: "ampr-info",
+ 1536: "ampr-inter",
+ 1537: "sdsc-lm",
+ 1538: "3ds-lm",
+ 1539: "intellistor-lm",
+ 1540: "rds",
+ 1541: "rds2",
+ 1542: "gridgen-elmd",
+ 1543: "simba-cs",
+ 1544: "aspeclmd",
+ 1545: "vistium-share",
+ 1546: "abbaccuray",
+ 1547: "laplink",
+ 1548: "axon-lm",
+ 1549: "shivasound",
+ 1550: "3m-image-lm",
+ 1551: "hecmtl-db",
+ 1552: "pciarray",
+ 1553: "sna-cs",
+ 1554: "caci-lm",
+ 1555: "livelan",
+ 1556: "veritas-pbx",
+ 1557: "arbortext-lm",
+ 1558: "xingmpeg",
+ 1559: "web2host",
+ 1560: "asci-val",
+ 1561: "facilityview",
+ 1562: "pconnectmgr",
+ 1563: "cadabra-lm",
+ 1564: "pay-per-view",
+ 1565: "winddlb",
+ 1566: "corelvideo",
+ 1567: "jlicelmd",
+ 1568: "tsspmap",
+ 1569: "ets",
+ 1570: "orbixd",
+ 1571: "rdb-dbs-disp",
+ 1572: "chip-lm",
+ 1573: "itscomm-ns",
+ 1574: "mvel-lm",
+ 1575: "oraclenames",
+ 1576: "moldflow-lm",
+ 1577: "hypercube-lm",
+ 1578: "jacobus-lm",
+ 1579: "ioc-sea-lm",
+ 1580: "tn-tl-r2",
+ 1581: "mil-2045-47001",
+ 1582: "msims",
+ 1583: "simbaexpress",
+ 1584: "tn-tl-fd2",
+ 1585: "intv",
+ 1586: "ibm-abtact",
+ 1587: "pra-elmd",
+ 1588: "triquest-lm",
+ 1589: "vqp",
+ 1590: "gemini-lm",
+ 1591: "ncpm-pm",
+ 1592: "commonspace",
+ 1593: "mainsoft-lm",
+ 1594: "sixtrak",
+ 1595: "radio",
+ 1596: "radio-bc",
+ 1597: "orbplus-iiop",
+ 1598: "picknfs",
+ 1599: "simbaservices",
+ 1600: "issd",
+ 1601: "aas",
+ 1602: "inspect",
+ 1603: "picodbc",
+ 1604: "icabrowser",
+ 1605: "slp",
+ 1606: "slm-api",
+ 1607: "stt",
+ 1608: "smart-lm",
+ 1609: "isysg-lm",
+ 1610: "taurus-wh",
+ 1611: "ill",
+ 1612: "netbill-trans",
+ 1613: "netbill-keyrep",
+ 1614: "netbill-cred",
+ 1615: "netbill-auth",
+ 1616: "netbill-prod",
+ 1617: "nimrod-agent",
+ 1618: "skytelnet",
+ 1619: "xs-openstorage",
+ 1620: "faxportwinport",
+ 1621: "softdataphone",
+ 1622: "ontime",
+ 1623: "jaleosnd",
+ 1624: "udp-sr-port",
+ 1625: "svs-omagent",
+ 1626: "shockwave",
+ 1627: "t128-gateway",
+ 1628: "lontalk-norm",
+ 1629: "lontalk-urgnt",
+ 1630: "oraclenet8cman",
+ 1631: "visitview",
+ 1632: "pammratc",
+ 1633: "pammrpc",
+ 1634: "loaprobe",
+ 1635: "edb-server1",
+ 1636: "isdc",
+ 1637: "islc",
+ 1638: "ismc",
+ 1639: "cert-initiator",
+ 1640: "cert-responder",
+ 1641: "invision",
+ 1642: "isis-am",
+ 1643: "isis-ambc",
+ 1644: "saiseh",
+ 1645: "sightline",
+ 1646: "sa-msg-port",
+ 1647: "rsap",
+ 1648: "concurrent-lm",
+ 1649: "kermit",
+ 1650: "nkd",
+ 1651: "shiva-confsrvr",
+ 1652: "xnmp",
+ 1653: "alphatech-lm",
+ 1654: "stargatealerts",
+ 1655: "dec-mbadmin",
+ 1656: "dec-mbadmin-h",
+ 1657: "fujitsu-mmpdc",
+ 1658: "sixnetudr",
+ 1659: "sg-lm",
+ 1660: "skip-mc-gikreq",
+ 1661: "netview-aix-1",
+ 1662: "netview-aix-2",
+ 1663: "netview-aix-3",
+ 1664: "netview-aix-4",
+ 1665: "netview-aix-5",
+ 1666: "netview-aix-6",
+ 1667: "netview-aix-7",
+ 1668: "netview-aix-8",
+ 1669: "netview-aix-9",
+ 1670: "netview-aix-10",
+ 1671: "netview-aix-11",
+ 1672: "netview-aix-12",
+ 1673: "proshare-mc-1",
+ 1674: "proshare-mc-2",
+ 1675: "pdp",
+ 1676: "netcomm2",
+ 1677: "groupwise",
+ 1678: "prolink",
+ 1679: "darcorp-lm",
+ 1680: "microcom-sbp",
+ 1681: "sd-elmd",
+ 1682: "lanyon-lantern",
+ 1683: "ncpm-hip",
+ 1684: "snaresecure",
+ 1685: "n2nremote",
+ 1686: "cvmon",
+ 1687: "nsjtp-ctrl",
+ 1688: "nsjtp-data",
+ 1689: "firefox",
+ 1690: "ng-umds",
+ 1691: "empire-empuma",
+ 1692: "sstsys-lm",
+ 1693: "rrirtr",
+ 1694: "rrimwm",
+ 1695: "rrilwm",
+ 1696: "rrifmm",
+ 1697: "rrisat",
+ 1698: "rsvp-encap-1",
+ 1699: "rsvp-encap-2",
+ 1700: "mps-raft",
+ 1701: "l2f",
+ 1702: "deskshare",
+ 1703: "hb-engine",
+ 1704: "bcs-broker",
+ 1705: "slingshot",
+ 1706: "jetform",
+ 1707: "vdmplay",
+ 1708: "gat-lmd",
+ 1709: "centra",
+ 1710: "impera",
+ 1711: "pptconference",
+ 1712: "registrar",
+ 1713: "conferencetalk",
+ 1714: "sesi-lm",
+ 1715: "houdini-lm",
+ 1716: "xmsg",
+ 1717: "fj-hdnet",
+ 1718: "h323gatedisc",
+ 1719: "h323gatestat",
+ 1720: "h323hostcall",
+ 1721: "caicci",
+ 1722: "hks-lm",
+ 1723: "pptp",
+ 1724: "csbphonemaster",
+ 1725: "iden-ralp",
+ 1726: "iberiagames",
+ 1727: "winddx",
+ 1728: "telindus",
+ 1729: "citynl",
+ 1730: "roketz",
+ 1731: "msiccp",
+ 1732: "proxim",
+ 1733: "siipat",
+ 1734: "cambertx-lm",
+ 1735: "privatechat",
+ 1736: "street-stream",
+ 1737: "ultimad",
+ 1738: "gamegen1",
+ 1739: "webaccess",
+ 1740: "encore",
+ 1741: "cisco-net-mgmt",
+ 1742: "3Com-nsd",
+ 1743: "cinegrfx-lm",
+ 1744: "ncpm-ft",
+ 1745: "remote-winsock",
+ 1746: "ftrapid-1",
+ 1747: "ftrapid-2",
+ 1748: "oracle-em1",
+ 1749: "aspen-services",
+ 1750: "sslp",
+ 1751: "swiftnet",
+ 1752: "lofr-lm",
+ 1754: "oracle-em2",
+ 1755: "ms-streaming",
+ 1756: "capfast-lmd",
+ 1757: "cnhrp",
+ 1758: "tftp-mcast",
+ 1759: "spss-lm",
+ 1760: "www-ldap-gw",
+ 1761: "cft-0",
+ 1762: "cft-1",
+ 1763: "cft-2",
+ 1764: "cft-3",
+ 1765: "cft-4",
+ 1766: "cft-5",
+ 1767: "cft-6",
+ 1768: "cft-7",
+ 1769: "bmc-net-adm",
+ 1770: "bmc-net-svc",
+ 1771: "vaultbase",
+ 1772: "essweb-gw",
+ 1773: "kmscontrol",
+ 1774: "global-dtserv",
+ 1776: "femis",
+ 1777: "powerguardian",
+ 1778: "prodigy-intrnet",
+ 1779: "pharmasoft",
+ 1780: "dpkeyserv",
+ 1781: "answersoft-lm",
+ 1782: "hp-hcip",
+ 1784: "finle-lm",
+ 1785: "windlm",
+ 1786: "funk-logger",
+ 1787: "funk-license",
+ 1788: "psmond",
+ 1789: "hello",
+ 1790: "nmsp",
+ 1791: "ea1",
+ 1792: "ibm-dt-2",
+ 1793: "rsc-robot",
+ 1794: "cera-bcm",
+ 1795: "dpi-proxy",
+ 1796: "vocaltec-admin",
+ 1797: "uma",
+ 1798: "etp",
+ 1799: "netrisk",
+ 1800: "ansys-lm",
+ 1801: "msmq",
+ 1802: "concomp1",
+ 1803: "hp-hcip-gwy",
+ 1804: "enl",
+ 1805: "enl-name",
+ 1806: "musiconline",
+ 1807: "fhsp",
+ 1808: "oracle-vp2",
+ 1809: "oracle-vp1",
+ 1810: "jerand-lm",
+ 1811: "scientia-sdb",
+ 1812: "radius",
+ 1813: "radius-acct",
+ 1814: "tdp-suite",
+ 1815: "mmpft",
+ 1816: "harp",
+ 1817: "rkb-oscs",
+ 1818: "etftp",
+ 1819: "plato-lm",
+ 1820: "mcagent",
+ 1821: "donnyworld",
+ 1822: "es-elmd",
+ 1823: "unisys-lm",
+ 1824: "metrics-pas",
+ 1825: "direcpc-video",
+ 1826: "ardt",
+ 1827: "asi",
+ 1828: "itm-mcell-u",
+ 1829: "optika-emedia",
+ 1830: "net8-cman",
+ 1831: "myrtle",
+ 1832: "tht-treasure",
+ 1833: "udpradio",
+ 1834: "ardusuni",
+ 1835: "ardusmul",
+ 1836: "ste-smsc",
+ 1837: "csoft1",
+ 1838: "talnet",
+ 1839: "netopia-vo1",
+ 1840: "netopia-vo2",
+ 1841: "netopia-vo3",
+ 1842: "netopia-vo4",
+ 1843: "netopia-vo5",
+ 1844: "direcpc-dll",
+ 1845: "altalink",
+ 1846: "tunstall-pnc",
+ 1847: "slp-notify",
+ 1848: "fjdocdist",
+ 1849: "alpha-sms",
+ 1850: "gsi",
+ 1851: "ctcd",
+ 1852: "virtual-time",
+ 1853: "vids-avtp",
+ 1854: "buddy-draw",
+ 1855: "fiorano-rtrsvc",
+ 1856: "fiorano-msgsvc",
+ 1857: "datacaptor",
+ 1858: "privateark",
+ 1859: "gammafetchsvr",
+ 1860: "sunscalar-svc",
+ 1861: "lecroy-vicp",
+ 1862: "mysql-cm-agent",
+ 1863: "msnp",
+ 1864: "paradym-31port",
+ 1865: "entp",
+ 1866: "swrmi",
+ 1867: "udrive",
+ 1868: "viziblebrowser",
+ 1869: "transact",
+ 1870: "sunscalar-dns",
+ 1871: "canocentral0",
+ 1872: "canocentral1",
+ 1873: "fjmpjps",
+ 1874: "fjswapsnp",
+ 1875: "westell-stats",
+ 1876: "ewcappsrv",
+ 1877: "hp-webqosdb",
+ 1878: "drmsmc",
+ 1879: "nettgain-nms",
+ 1880: "vsat-control",
+ 1881: "ibm-mqseries2",
+ 1882: "ecsqdmn",
+ 1883: "mqtt",
+ 1884: "idmaps",
+ 1885: "vrtstrapserver",
+ 1886: "leoip",
+ 1887: "filex-lport",
+ 1888: "ncconfig",
+ 1889: "unify-adapter",
+ 1890: "wilkenlistener",
+ 1891: "childkey-notif",
+ 1892: "childkey-ctrl",
+ 1893: "elad",
+ 1894: "o2server-port",
+ 1896: "b-novative-ls",
+ 1897: "metaagent",
+ 1898: "cymtec-port",
+ 1899: "mc2studios",
+ 1900: "ssdp",
+ 1901: "fjicl-tep-a",
+ 1902: "fjicl-tep-b",
+ 1903: "linkname",
+ 1904: "fjicl-tep-c",
+ 1905: "sugp",
+ 1906: "tpmd",
+ 1907: "intrastar",
+ 1908: "dawn",
+ 1909: "global-wlink",
+ 1910: "ultrabac",
+ 1911: "mtp",
+ 1912: "rhp-iibp",
+ 1913: "armadp",
+ 1914: "elm-momentum",
+ 1915: "facelink",
+ 1916: "persona",
+ 1917: "noagent",
+ 1918: "can-nds",
+ 1919: "can-dch",
+ 1920: "can-ferret",
+ 1921: "noadmin",
+ 1922: "tapestry",
+ 1923: "spice",
+ 1924: "xiip",
+ 1925: "discovery-port",
+ 1926: "egs",
+ 1927: "videte-cipc",
+ 1928: "emsd-port",
+ 1929: "bandwiz-system",
+ 1930: "driveappserver",
+ 1931: "amdsched",
+ 1932: "ctt-broker",
+ 1933: "xmapi",
+ 1934: "xaapi",
+ 1935: "macromedia-fcs",
+ 1936: "jetcmeserver",
+ 1937: "jwserver",
+ 1938: "jwclient",
+ 1939: "jvserver",
+ 1940: "jvclient",
+ 1941: "dic-aida",
+ 1942: "res",
+ 1943: "beeyond-media",
+ 1944: "close-combat",
+ 1945: "dialogic-elmd",
+ 1946: "tekpls",
+ 1947: "sentinelsrm",
+ 1948: "eye2eye",
+ 1949: "ismaeasdaqlive",
+ 1950: "ismaeasdaqtest",
+ 1951: "bcs-lmserver",
+ 1952: "mpnjsc",
+ 1953: "rapidbase",
+ 1954: "abr-api",
+ 1955: "abr-secure",
+ 1956: "vrtl-vmf-ds",
+ 1957: "unix-status",
+ 1958: "dxadmind",
+ 1959: "simp-all",
+ 1960: "nasmanager",
+ 1961: "bts-appserver",
+ 1962: "biap-mp",
+ 1963: "webmachine",
+ 1964: "solid-e-engine",
+ 1965: "tivoli-npm",
+ 1966: "slush",
+ 1967: "sns-quote",
+ 1968: "lipsinc",
+ 1969: "lipsinc1",
+ 1970: "netop-rc",
+ 1971: "netop-school",
+ 1972: "intersys-cache",
+ 1973: "dlsrap",
+ 1974: "drp",
+ 1975: "tcoflashagent",
+ 1976: "tcoregagent",
+ 1977: "tcoaddressbook",
+ 1978: "unisql",
+ 1979: "unisql-java",
+ 1980: "pearldoc-xact",
+ 1981: "p2pq",
+ 1982: "estamp",
+ 1983: "lhtp",
+ 1984: "bb",
+ 1985: "hsrp",
+ 1986: "licensedaemon",
+ 1987: "tr-rsrb-p1",
+ 1988: "tr-rsrb-p2",
+ 1989: "tr-rsrb-p3",
+ 1990: "stun-p1",
+ 1991: "stun-p2",
+ 1992: "stun-p3",
+ 1993: "snmp-tcp-port",
+ 1994: "stun-port",
+ 1995: "perf-port",
+ 1996: "tr-rsrb-port",
+ 1997: "gdp-port",
+ 1998: "x25-svc-port",
+ 1999: "tcp-id-port",
+ 2000: "cisco-sccp",
+ 2001: "wizard",
+ 2002: "globe",
+ 2003: "brutus",
+ 2004: "emce",
+ 2005: "oracle",
+ 2006: "raid-cd",
+ 2007: "raid-am",
+ 2008: "terminaldb",
+ 2009: "whosockami",
+ 2010: "pipe-server",
+ 2011: "servserv",
+ 2012: "raid-ac",
+ 2013: "raid-cd",
+ 2014: "raid-sf",
+ 2015: "raid-cs",
+ 2016: "bootserver",
+ 2017: "bootclient",
+ 2018: "rellpack",
+ 2019: "about",
+ 2020: "xinupageserver",
+ 2021: "xinuexpansion1",
+ 2022: "xinuexpansion2",
+ 2023: "xinuexpansion3",
+ 2024: "xinuexpansion4",
+ 2025: "xribs",
+ 2026: "scrabble",
+ 2027: "shadowserver",
+ 2028: "submitserver",
+ 2029: "hsrpv6",
+ 2030: "device2",
+ 2031: "mobrien-chat",
+ 2032: "blackboard",
+ 2033: "glogger",
+ 2034: "scoremgr",
+ 2035: "imsldoc",
+ 2036: "e-dpnet",
+ 2037: "applus",
+ 2038: "objectmanager",
+ 2039: "prizma",
+ 2040: "lam",
+ 2041: "interbase",
+ 2042: "isis",
+ 2043: "isis-bcast",
+ 2044: "rimsl",
+ 2045: "cdfunc",
+ 2046: "sdfunc",
+ 2047: "dls",
+ 2048: "dls-monitor",
+ 2049: "shilp",
+ 2050: "av-emb-config",
+ 2051: "epnsdp",
+ 2052: "clearvisn",
+ 2053: "lot105-ds-upd",
+ 2054: "weblogin",
+ 2055: "iop",
+ 2056: "omnisky",
+ 2057: "rich-cp",
+ 2058: "newwavesearch",
+ 2059: "bmc-messaging",
+ 2060: "teleniumdaemon",
+ 2061: "netmount",
+ 2062: "icg-swp",
+ 2063: "icg-bridge",
+ 2064: "icg-iprelay",
+ 2065: "dlsrpn",
+ 2066: "aura",
+ 2067: "dlswpn",
+ 2068: "avauthsrvprtcl",
+ 2069: "event-port",
+ 2070: "ah-esp-encap",
+ 2071: "acp-port",
+ 2072: "msync",
+ 2073: "gxs-data-port",
+ 2074: "vrtl-vmf-sa",
+ 2075: "newlixengine",
+ 2076: "newlixconfig",
+ 2077: "tsrmagt",
+ 2078: "tpcsrvr",
+ 2079: "idware-router",
+ 2080: "autodesk-nlm",
+ 2081: "kme-trap-port",
+ 2082: "infowave",
+ 2083: "radsec",
+ 2084: "sunclustergeo",
+ 2085: "ada-cip",
+ 2086: "gnunet",
+ 2087: "eli",
+ 2088: "ip-blf",
+ 2089: "sep",
+ 2090: "lrp",
+ 2091: "prp",
+ 2092: "descent3",
+ 2093: "nbx-cc",
+ 2094: "nbx-au",
+ 2095: "nbx-ser",
+ 2096: "nbx-dir",
+ 2097: "jetformpreview",
+ 2098: "dialog-port",
+ 2099: "h2250-annex-g",
+ 2100: "amiganetfs",
+ 2101: "rtcm-sc104",
+ 2102: "zephyr-srv",
+ 2103: "zephyr-clt",
+ 2104: "zephyr-hm",
+ 2105: "minipay",
+ 2106: "mzap",
+ 2107: "bintec-admin",
+ 2108: "comcam",
+ 2109: "ergolight",
+ 2110: "umsp",
+ 2111: "dsatp",
+ 2112: "idonix-metanet",
+ 2113: "hsl-storm",
+ 2114: "newheights",
+ 2115: "kdm",
+ 2116: "ccowcmr",
+ 2117: "mentaclient",
+ 2118: "mentaserver",
+ 2119: "gsigatekeeper",
+ 2120: "qencp",
+ 2121: "scientia-ssdb",
+ 2122: "caupc-remote",
+ 2123: "gtp-control",
+ 2124: "elatelink",
+ 2125: "lockstep",
+ 2126: "pktcable-cops",
+ 2127: "index-pc-wb",
+ 2128: "net-steward",
+ 2129: "cs-live",
+ 2130: "xds",
+ 2131: "avantageb2b",
+ 2132: "solera-epmap",
+ 2133: "zymed-zpp",
+ 2134: "avenue",
+ 2135: "gris",
+ 2136: "appworxsrv",
+ 2137: "connect",
+ 2138: "unbind-cluster",
+ 2139: "ias-auth",
+ 2140: "ias-reg",
+ 2141: "ias-admind",
+ 2142: "tdmoip",
+ 2143: "lv-jc",
+ 2144: "lv-ffx",
+ 2145: "lv-pici",
+ 2146: "lv-not",
+ 2147: "lv-auth",
+ 2148: "veritas-ucl",
+ 2149: "acptsys",
+ 2150: "dynamic3d",
+ 2151: "docent",
+ 2152: "gtp-user",
+ 2153: "ctlptc",
+ 2154: "stdptc",
+ 2155: "brdptc",
+ 2156: "trp",
+ 2157: "xnds",
+ 2158: "touchnetplus",
+ 2159: "gdbremote",
+ 2160: "apc-2160",
+ 2161: "apc-2161",
+ 2162: "navisphere",
+ 2163: "navisphere-sec",
+ 2164: "ddns-v3",
+ 2165: "x-bone-api",
+ 2166: "iwserver",
+ 2167: "raw-serial",
+ 2168: "easy-soft-mux",
+ 2169: "brain",
+ 2170: "eyetv",
+ 2171: "msfw-storage",
+ 2172: "msfw-s-storage",
+ 2173: "msfw-replica",
+ 2174: "msfw-array",
+ 2175: "airsync",
+ 2176: "rapi",
+ 2177: "qwave",
+ 2178: "bitspeer",
+ 2179: "vmrdp",
+ 2180: "mc-gt-srv",
+ 2181: "eforward",
+ 2182: "cgn-stat",
+ 2183: "cgn-config",
+ 2184: "nvd",
+ 2185: "onbase-dds",
+ 2186: "gtaua",
+ 2187: "ssmd",
+ 2190: "tivoconnect",
+ 2191: "tvbus",
+ 2192: "asdis",
+ 2193: "drwcs",
+ 2197: "mnp-exchange",
+ 2198: "onehome-remote",
+ 2199: "onehome-help",
+ 2200: "ici",
+ 2201: "ats",
+ 2202: "imtc-map",
+ 2203: "b2-runtime",
+ 2204: "b2-license",
+ 2205: "jps",
+ 2206: "hpocbus",
+ 2207: "hpssd",
+ 2208: "hpiod",
+ 2209: "rimf-ps",
+ 2210: "noaaport",
+ 2211: "emwin",
+ 2212: "leecoposserver",
+ 2213: "kali",
+ 2214: "rpi",
+ 2215: "ipcore",
+ 2216: "vtu-comms",
+ 2217: "gotodevice",
+ 2218: "bounzza",
+ 2219: "netiq-ncap",
+ 2220: "netiq",
+ 2221: "ethernet-ip-s",
+ 2222: "EtherNet-IP-1",
+ 2223: "rockwell-csp2",
+ 2224: "efi-mg",
+ 2226: "di-drm",
+ 2227: "di-msg",
+ 2228: "ehome-ms",
+ 2229: "datalens",
+ 2230: "queueadm",
+ 2231: "wimaxasncp",
+ 2232: "ivs-video",
+ 2233: "infocrypt",
+ 2234: "directplay",
+ 2235: "sercomm-wlink",
+ 2236: "nani",
+ 2237: "optech-port1-lm",
+ 2238: "aviva-sna",
+ 2239: "imagequery",
+ 2240: "recipe",
+ 2241: "ivsd",
+ 2242: "foliocorp",
+ 2243: "magicom",
+ 2244: "nmsserver",
+ 2245: "hao",
+ 2246: "pc-mta-addrmap",
+ 2247: "antidotemgrsvr",
+ 2248: "ums",
+ 2249: "rfmp",
+ 2250: "remote-collab",
+ 2251: "dif-port",
+ 2252: "njenet-ssl",
+ 2253: "dtv-chan-req",
+ 2254: "seispoc",
+ 2255: "vrtp",
+ 2256: "pcc-mfp",
+ 2257: "simple-tx-rx",
+ 2258: "rcts",
+ 2260: "apc-2260",
+ 2261: "comotionmaster",
+ 2262: "comotionback",
+ 2263: "ecwcfg",
+ 2264: "apx500api-1",
+ 2265: "apx500api-2",
+ 2266: "mfserver",
+ 2267: "ontobroker",
+ 2268: "amt",
+ 2269: "mikey",
+ 2270: "starschool",
+ 2271: "mmcals",
+ 2272: "mmcal",
+ 2273: "mysql-im",
+ 2274: "pcttunnell",
+ 2275: "ibridge-data",
+ 2276: "ibridge-mgmt",
+ 2277: "bluectrlproxy",
+ 2278: "s3db",
+ 2279: "xmquery",
+ 2280: "lnvpoller",
+ 2281: "lnvconsole",
+ 2282: "lnvalarm",
+ 2283: "lnvstatus",
+ 2284: "lnvmaps",
+ 2285: "lnvmailmon",
+ 2286: "nas-metering",
+ 2287: "dna",
+ 2288: "netml",
+ 2289: "dict-lookup",
+ 2290: "sonus-logging",
+ 2291: "eapsp",
+ 2292: "mib-streaming",
+ 2293: "npdbgmngr",
+ 2294: "konshus-lm",
+ 2295: "advant-lm",
+ 2296: "theta-lm",
+ 2297: "d2k-datamover1",
+ 2298: "d2k-datamover2",
+ 2299: "pc-telecommute",
+ 2300: "cvmmon",
+ 2301: "cpq-wbem",
+ 2302: "binderysupport",
+ 2303: "proxy-gateway",
+ 2304: "attachmate-uts",
+ 2305: "mt-scaleserver",
+ 2306: "tappi-boxnet",
+ 2307: "pehelp",
+ 2308: "sdhelp",
+ 2309: "sdserver",
+ 2310: "sdclient",
+ 2311: "messageservice",
+ 2312: "wanscaler",
+ 2313: "iapp",
+ 2314: "cr-websystems",
+ 2315: "precise-sft",
+ 2316: "sent-lm",
+ 2317: "attachmate-g32",
+ 2318: "cadencecontrol",
+ 2319: "infolibria",
+ 2320: "siebel-ns",
+ 2321: "rdlap",
+ 2322: "ofsd",
+ 2323: "3d-nfsd",
+ 2324: "cosmocall",
+ 2325: "ansysli",
+ 2326: "idcp",
+ 2327: "xingcsm",
+ 2328: "netrix-sftm",
+ 2329: "nvd",
+ 2330: "tscchat",
+ 2331: "agentview",
+ 2332: "rcc-host",
+ 2333: "snapp",
+ 2334: "ace-client",
+ 2335: "ace-proxy",
+ 2336: "appleugcontrol",
+ 2337: "ideesrv",
+ 2338: "norton-lambert",
+ 2339: "3com-webview",
+ 2340: "wrs-registry",
+ 2341: "xiostatus",
+ 2342: "manage-exec",
+ 2343: "nati-logos",
+ 2344: "fcmsys",
+ 2345: "dbm",
+ 2346: "redstorm-join",
+ 2347: "redstorm-find",
+ 2348: "redstorm-info",
+ 2349: "redstorm-diag",
+ 2350: "psbserver",
+ 2351: "psrserver",
+ 2352: "pslserver",
+ 2353: "pspserver",
+ 2354: "psprserver",
+ 2355: "psdbserver",
+ 2356: "gxtelmd",
+ 2357: "unihub-server",
+ 2358: "futrix",
+ 2359: "flukeserver",
+ 2360: "nexstorindltd",
+ 2361: "tl1",
+ 2362: "digiman",
+ 2363: "mediacntrlnfsd",
+ 2364: "oi-2000",
+ 2365: "dbref",
+ 2366: "qip-login",
+ 2367: "service-ctrl",
+ 2368: "opentable",
+ 2370: "l3-hbmon",
+ 2372: "lanmessenger",
+ 2381: "compaq-https",
+ 2382: "ms-olap3",
+ 2383: "ms-olap4",
+ 2384: "sd-capacity",
+ 2385: "sd-data",
+ 2386: "virtualtape",
+ 2387: "vsamredirector",
+ 2388: "mynahautostart",
+ 2389: "ovsessionmgr",
+ 2390: "rsmtp",
+ 2391: "3com-net-mgmt",
+ 2392: "tacticalauth",
+ 2393: "ms-olap1",
+ 2394: "ms-olap2",
+ 2395: "lan900-remote",
+ 2396: "wusage",
+ 2397: "ncl",
+ 2398: "orbiter",
+ 2399: "fmpro-fdal",
+ 2400: "opequus-server",
+ 2401: "cvspserver",
+ 2402: "taskmaster2000",
+ 2403: "taskmaster2000",
+ 2404: "iec-104",
+ 2405: "trc-netpoll",
+ 2406: "jediserver",
+ 2407: "orion",
+ 2409: "sns-protocol",
+ 2410: "vrts-registry",
+ 2411: "netwave-ap-mgmt",
+ 2412: "cdn",
+ 2413: "orion-rmi-reg",
+ 2414: "beeyond",
+ 2415: "codima-rtp",
+ 2416: "rmtserver",
+ 2417: "composit-server",
+ 2418: "cas",
+ 2419: "attachmate-s2s",
+ 2420: "dslremote-mgmt",
+ 2421: "g-talk",
+ 2422: "crmsbits",
+ 2423: "rnrp",
+ 2424: "kofax-svr",
+ 2425: "fjitsuappmgr",
+ 2426: "vcmp",
+ 2427: "mgcp-gateway",
+ 2428: "ott",
+ 2429: "ft-role",
+ 2430: "venus",
+ 2431: "venus-se",
+ 2432: "codasrv",
+ 2433: "codasrv-se",
+ 2434: "pxc-epmap",
+ 2435: "optilogic",
+ 2436: "topx",
+ 2437: "unicontrol",
+ 2438: "msp",
+ 2439: "sybasedbsynch",
+ 2440: "spearway",
+ 2441: "pvsw-inet",
+ 2442: "netangel",
+ 2443: "powerclientcsf",
+ 2444: "btpp2sectrans",
+ 2445: "dtn1",
+ 2446: "bues-service",
+ 2447: "ovwdb",
+ 2448: "hpppssvr",
+ 2449: "ratl",
+ 2450: "netadmin",
+ 2451: "netchat",
+ 2452: "snifferclient",
+ 2453: "madge-ltd",
+ 2454: "indx-dds",
+ 2455: "wago-io-system",
+ 2456: "altav-remmgt",
+ 2457: "rapido-ip",
+ 2458: "griffin",
+ 2459: "community",
+ 2460: "ms-theater",
+ 2461: "qadmifoper",
+ 2462: "qadmifevent",
+ 2463: "lsi-raid-mgmt",
+ 2464: "direcpc-si",
+ 2465: "lbm",
+ 2466: "lbf",
+ 2467: "high-criteria",
+ 2468: "qip-msgd",
+ 2469: "mti-tcs-comm",
+ 2470: "taskman-port",
+ 2471: "seaodbc",
+ 2472: "c3",
+ 2473: "aker-cdp",
+ 2474: "vitalanalysis",
+ 2475: "ace-server",
+ 2476: "ace-svr-prop",
+ 2477: "ssm-cvs",
+ 2478: "ssm-cssps",
+ 2479: "ssm-els",
+ 2480: "powerexchange",
+ 2481: "giop",
+ 2482: "giop-ssl",
+ 2483: "ttc",
+ 2484: "ttc-ssl",
+ 2485: "netobjects1",
+ 2486: "netobjects2",
+ 2487: "pns",
+ 2488: "moy-corp",
+ 2489: "tsilb",
+ 2490: "qip-qdhcp",
+ 2491: "conclave-cpp",
+ 2492: "groove",
+ 2493: "talarian-mqs",
+ 2494: "bmc-ar",
+ 2495: "fast-rem-serv",
+ 2496: "dirgis",
+ 2497: "quaddb",
+ 2498: "odn-castraq",
+ 2499: "unicontrol",
+ 2500: "rtsserv",
+ 2501: "rtsclient",
+ 2502: "kentrox-prot",
+ 2503: "nms-dpnss",
+ 2504: "wlbs",
+ 2505: "ppcontrol",
+ 2506: "jbroker",
+ 2507: "spock",
+ 2508: "jdatastore",
+ 2509: "fjmpss",
+ 2510: "fjappmgrbulk",
+ 2511: "metastorm",
+ 2512: "citrixima",
+ 2513: "citrixadmin",
+ 2514: "facsys-ntp",
+ 2515: "facsys-router",
+ 2516: "maincontrol",
+ 2517: "call-sig-trans",
+ 2518: "willy",
+ 2519: "globmsgsvc",
+ 2520: "pvsw",
+ 2521: "adaptecmgr",
+ 2522: "windb",
+ 2523: "qke-llc-v3",
+ 2524: "optiwave-lm",
+ 2525: "ms-v-worlds",
+ 2526: "ema-sent-lm",
+ 2527: "iqserver",
+ 2528: "ncr-ccl",
+ 2529: "utsftp",
+ 2530: "vrcommerce",
+ 2531: "ito-e-gui",
+ 2532: "ovtopmd",
+ 2533: "snifferserver",
+ 2534: "combox-web-acc",
+ 2535: "madcap",
+ 2536: "btpp2audctr1",
+ 2537: "upgrade",
+ 2538: "vnwk-prapi",
+ 2539: "vsiadmin",
+ 2540: "lonworks",
+ 2541: "lonworks2",
+ 2542: "udrawgraph",
+ 2543: "reftek",
+ 2544: "novell-zen",
+ 2545: "sis-emt",
+ 2546: "vytalvaultbrtp",
+ 2547: "vytalvaultvsmp",
+ 2548: "vytalvaultpipe",
+ 2549: "ipass",
+ 2550: "ads",
+ 2551: "isg-uda-server",
+ 2552: "call-logging",
+ 2553: "efidiningport",
+ 2554: "vcnet-link-v10",
+ 2555: "compaq-wcp",
+ 2556: "nicetec-nmsvc",
+ 2557: "nicetec-mgmt",
+ 2558: "pclemultimedia",
+ 2559: "lstp",
+ 2560: "labrat",
+ 2561: "mosaixcc",
+ 2562: "delibo",
+ 2563: "cti-redwood",
+ 2564: "hp-3000-telnet",
+ 2565: "coord-svr",
+ 2566: "pcs-pcw",
+ 2567: "clp",
+ 2568: "spamtrap",
+ 2569: "sonuscallsig",
+ 2570: "hs-port",
+ 2571: "cecsvc",
+ 2572: "ibp",
+ 2573: "trustestablish",
+ 2574: "blockade-bpsp",
+ 2575: "hl7",
+ 2576: "tclprodebugger",
+ 2577: "scipticslsrvr",
+ 2578: "rvs-isdn-dcp",
+ 2579: "mpfoncl",
+ 2580: "tributary",
+ 2581: "argis-te",
+ 2582: "argis-ds",
+ 2583: "mon",
+ 2584: "cyaserv",
+ 2585: "netx-server",
+ 2586: "netx-agent",
+ 2587: "masc",
+ 2588: "privilege",
+ 2589: "quartus-tcl",
+ 2590: "idotdist",
+ 2591: "maytagshuffle",
+ 2592: "netrek",
+ 2593: "mns-mail",
+ 2594: "dts",
+ 2595: "worldfusion1",
+ 2596: "worldfusion2",
+ 2597: "homesteadglory",
+ 2598: "citriximaclient",
+ 2599: "snapd",
+ 2600: "hpstgmgr",
+ 2601: "discp-client",
+ 2602: "discp-server",
+ 2603: "servicemeter",
+ 2604: "nsc-ccs",
+ 2605: "nsc-posa",
+ 2606: "netmon",
+ 2607: "connection",
+ 2608: "wag-service",
+ 2609: "system-monitor",
+ 2610: "versa-tek",
+ 2611: "lionhead",
+ 2612: "qpasa-agent",
+ 2613: "smntubootstrap",
+ 2614: "neveroffline",
+ 2615: "firepower",
+ 2616: "appswitch-emp",
+ 2617: "cmadmin",
+ 2618: "priority-e-com",
+ 2619: "bruce",
+ 2620: "lpsrecommender",
+ 2621: "miles-apart",
+ 2622: "metricadbc",
+ 2623: "lmdp",
+ 2624: "aria",
+ 2625: "blwnkl-port",
+ 2626: "gbjd816",
+ 2627: "moshebeeri",
+ 2628: "dict",
+ 2629: "sitaraserver",
+ 2630: "sitaramgmt",
+ 2631: "sitaradir",
+ 2632: "irdg-post",
+ 2633: "interintelli",
+ 2634: "pk-electronics",
+ 2635: "backburner",
+ 2636: "solve",
+ 2637: "imdocsvc",
+ 2638: "sybaseanywhere",
+ 2639: "aminet",
+ 2640: "ami-control",
+ 2641: "hdl-srv",
+ 2642: "tragic",
+ 2643: "gte-samp",
+ 2644: "travsoft-ipx-t",
+ 2645: "novell-ipx-cmd",
+ 2646: "and-lm",
+ 2647: "syncserver",
+ 2648: "upsnotifyprot",
+ 2649: "vpsipport",
+ 2650: "eristwoguns",
+ 2651: "ebinsite",
+ 2652: "interpathpanel",
+ 2653: "sonus",
+ 2654: "corel-vncadmin",
+ 2655: "unglue",
+ 2656: "kana",
+ 2657: "sns-dispatcher",
+ 2658: "sns-admin",
+ 2659: "sns-query",
+ 2660: "gcmonitor",
+ 2661: "olhost",
+ 2662: "bintec-capi",
+ 2663: "bintec-tapi",
+ 2664: "patrol-mq-gm",
+ 2665: "patrol-mq-nm",
+ 2666: "extensis",
+ 2667: "alarm-clock-s",
+ 2668: "alarm-clock-c",
+ 2669: "toad",
+ 2670: "tve-announce",
+ 2671: "newlixreg",
+ 2672: "nhserver",
+ 2673: "firstcall42",
+ 2674: "ewnn",
+ 2675: "ttc-etap",
+ 2676: "simslink",
+ 2677: "gadgetgate1way",
+ 2678: "gadgetgate2way",
+ 2679: "syncserverssl",
+ 2680: "pxc-sapxom",
+ 2681: "mpnjsomb",
+ 2683: "ncdloadbalance",
+ 2684: "mpnjsosv",
+ 2685: "mpnjsocl",
+ 2686: "mpnjsomg",
+ 2687: "pq-lic-mgmt",
+ 2688: "md-cg-http",
+ 2689: "fastlynx",
+ 2690: "hp-nnm-data",
+ 2691: "itinternet",
+ 2692: "admins-lms",
+ 2694: "pwrsevent",
+ 2695: "vspread",
+ 2696: "unifyadmin",
+ 2697: "oce-snmp-trap",
+ 2698: "mck-ivpip",
+ 2699: "csoft-plusclnt",
+ 2700: "tqdata",
+ 2701: "sms-rcinfo",
+ 2702: "sms-xfer",
+ 2703: "sms-chat",
+ 2704: "sms-remctrl",
+ 2705: "sds-admin",
+ 2706: "ncdmirroring",
+ 2707: "emcsymapiport",
+ 2708: "banyan-net",
+ 2709: "supermon",
+ 2710: "sso-service",
+ 2711: "sso-control",
+ 2712: "aocp",
+ 2713: "raventbs",
+ 2714: "raventdm",
+ 2715: "hpstgmgr2",
+ 2716: "inova-ip-disco",
+ 2717: "pn-requester",
+ 2718: "pn-requester2",
+ 2719: "scan-change",
+ 2720: "wkars",
+ 2721: "smart-diagnose",
+ 2722: "proactivesrvr",
+ 2723: "watchdog-nt",
+ 2724: "qotps",
+ 2725: "msolap-ptp2",
+ 2726: "tams",
+ 2727: "mgcp-callagent",
+ 2728: "sqdr",
+ 2729: "tcim-control",
+ 2730: "nec-raidplus",
+ 2731: "fyre-messanger",
+ 2732: "g5m",
+ 2733: "signet-ctf",
+ 2734: "ccs-software",
+ 2735: "netiq-mc",
+ 2736: "radwiz-nms-srv",
+ 2737: "srp-feedback",
+ 2738: "ndl-tcp-ois-gw",
+ 2739: "tn-timing",
+ 2740: "alarm",
+ 2741: "tsb",
+ 2742: "tsb2",
+ 2743: "murx",
+ 2744: "honyaku",
+ 2745: "urbisnet",
+ 2746: "cpudpencap",
+ 2747: "fjippol-swrly",
+ 2748: "fjippol-polsvr",
+ 2749: "fjippol-cnsl",
+ 2750: "fjippol-port1",
+ 2751: "fjippol-port2",
+ 2752: "rsisysaccess",
+ 2753: "de-spot",
+ 2754: "apollo-cc",
+ 2755: "expresspay",
+ 2756: "simplement-tie",
+ 2757: "cnrp",
+ 2758: "apollo-status",
+ 2759: "apollo-gms",
+ 2760: "sabams",
+ 2761: "dicom-iscl",
+ 2762: "dicom-tls",
+ 2763: "desktop-dna",
+ 2764: "data-insurance",
+ 2765: "qip-audup",
+ 2766: "compaq-scp",
+ 2767: "uadtc",
+ 2768: "uacs",
+ 2769: "exce",
+ 2770: "veronica",
+ 2771: "vergencecm",
+ 2772: "auris",
+ 2773: "rbakcup1",
+ 2774: "rbakcup2",
+ 2775: "smpp",
+ 2776: "ridgeway1",
+ 2777: "ridgeway2",
+ 2778: "gwen-sonya",
+ 2779: "lbc-sync",
+ 2780: "lbc-control",
+ 2781: "whosells",
+ 2782: "everydayrc",
+ 2783: "aises",
+ 2784: "www-dev",
+ 2785: "aic-np",
+ 2786: "aic-oncrpc",
+ 2787: "piccolo",
+ 2788: "fryeserv",
+ 2789: "media-agent",
+ 2790: "plgproxy",
+ 2791: "mtport-regist",
+ 2792: "f5-globalsite",
+ 2793: "initlsmsad",
+ 2795: "livestats",
+ 2796: "ac-tech",
+ 2797: "esp-encap",
+ 2798: "tmesis-upshot",
+ 2799: "icon-discover",
+ 2800: "acc-raid",
+ 2801: "igcp",
+ 2802: "veritas-udp1",
+ 2803: "btprjctrl",
+ 2804: "dvr-esm",
+ 2805: "wta-wsp-s",
+ 2806: "cspuni",
+ 2807: "cspmulti",
+ 2808: "j-lan-p",
+ 2809: "corbaloc",
+ 2810: "netsteward",
+ 2811: "gsiftp",
+ 2812: "atmtcp",
+ 2813: "llm-pass",
+ 2814: "llm-csv",
+ 2815: "lbc-measure",
+ 2816: "lbc-watchdog",
+ 2817: "nmsigport",
+ 2818: "rmlnk",
+ 2819: "fc-faultnotify",
+ 2820: "univision",
+ 2821: "vrts-at-port",
+ 2822: "ka0wuc",
+ 2823: "cqg-netlan",
+ 2824: "cqg-netlan-1",
+ 2826: "slc-systemlog",
+ 2827: "slc-ctrlrloops",
+ 2828: "itm-lm",
+ 2829: "silkp1",
+ 2830: "silkp2",
+ 2831: "silkp3",
+ 2832: "silkp4",
+ 2833: "glishd",
+ 2834: "evtp",
+ 2835: "evtp-data",
+ 2836: "catalyst",
+ 2837: "repliweb",
+ 2838: "starbot",
+ 2839: "nmsigport",
+ 2840: "l3-exprt",
+ 2841: "l3-ranger",
+ 2842: "l3-hawk",
+ 2843: "pdnet",
+ 2844: "bpcp-poll",
+ 2845: "bpcp-trap",
+ 2846: "aimpp-hello",
+ 2847: "aimpp-port-req",
+ 2848: "amt-blc-port",
+ 2849: "fxp",
+ 2850: "metaconsole",
+ 2851: "webemshttp",
+ 2852: "bears-01",
+ 2853: "ispipes",
+ 2854: "infomover",
+ 2856: "cesdinv",
+ 2857: "simctlp",
+ 2858: "ecnp",
+ 2859: "activememory",
+ 2860: "dialpad-voice1",
+ 2861: "dialpad-voice2",
+ 2862: "ttg-protocol",
+ 2863: "sonardata",
+ 2864: "astromed-main",
+ 2865: "pit-vpn",
+ 2866: "iwlistener",
+ 2867: "esps-portal",
+ 2868: "npep-messaging",
+ 2869: "icslap",
+ 2870: "daishi",
+ 2871: "msi-selectplay",
+ 2872: "radix",
+ 2874: "dxmessagebase1",
+ 2875: "dxmessagebase2",
+ 2876: "sps-tunnel",
+ 2877: "bluelance",
+ 2878: "aap",
+ 2879: "ucentric-ds",
+ 2880: "synapse",
+ 2881: "ndsp",
+ 2882: "ndtp",
+ 2883: "ndnp",
+ 2884: "flashmsg",
+ 2885: "topflow",
+ 2886: "responselogic",
+ 2887: "aironetddp",
+ 2888: "spcsdlobby",
+ 2889: "rsom",
+ 2890: "cspclmulti",
+ 2891: "cinegrfx-elmd",
+ 2892: "snifferdata",
+ 2893: "vseconnector",
+ 2894: "abacus-remote",
+ 2895: "natuslink",
+ 2896: "ecovisiong6-1",
+ 2897: "citrix-rtmp",
+ 2898: "appliance-cfg",
+ 2899: "powergemplus",
+ 2900: "quicksuite",
+ 2901: "allstorcns",
+ 2902: "netaspi",
+ 2903: "suitcase",
+ 2904: "m2ua",
+ 2906: "caller9",
+ 2907: "webmethods-b2b",
+ 2908: "mao",
+ 2909: "funk-dialout",
+ 2910: "tdaccess",
+ 2911: "blockade",
+ 2912: "epicon",
+ 2913: "boosterware",
+ 2914: "gamelobby",
+ 2915: "tksocket",
+ 2916: "elvin-server",
+ 2917: "elvin-client",
+ 2918: "kastenchasepad",
+ 2919: "roboer",
+ 2920: "roboeda",
+ 2921: "cesdcdman",
+ 2922: "cesdcdtrn",
+ 2923: "wta-wsp-wtp-s",
+ 2924: "precise-vip",
+ 2926: "mobile-file-dl",
+ 2927: "unimobilectrl",
+ 2928: "redstone-cpss",
+ 2929: "amx-webadmin",
+ 2930: "amx-weblinx",
+ 2931: "circle-x",
+ 2932: "incp",
+ 2933: "4-tieropmgw",
+ 2934: "4-tieropmcli",
+ 2935: "qtp",
+ 2936: "otpatch",
+ 2937: "pnaconsult-lm",
+ 2938: "sm-pas-1",
+ 2939: "sm-pas-2",
+ 2940: "sm-pas-3",
+ 2941: "sm-pas-4",
+ 2942: "sm-pas-5",
+ 2943: "ttnrepository",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 2946: "fjsvmpor",
+ 2947: "gpsd",
+ 2948: "wap-push",
+ 2949: "wap-pushsecure",
+ 2950: "esip",
+ 2951: "ottp",
+ 2952: "mpfwsas",
+ 2953: "ovalarmsrv",
+ 2954: "ovalarmsrv-cmd",
+ 2955: "csnotify",
+ 2956: "ovrimosdbman",
+ 2957: "jmact5",
+ 2958: "jmact6",
+ 2959: "rmopagt",
+ 2960: "dfoxserver",
+ 2961: "boldsoft-lm",
+ 2962: "iph-policy-cli",
+ 2963: "iph-policy-adm",
+ 2964: "bullant-srap",
+ 2965: "bullant-rap",
+ 2966: "idp-infotrieve",
+ 2967: "ssc-agent",
+ 2968: "enpp",
+ 2969: "essp",
+ 2970: "index-net",
+ 2971: "netclip",
+ 2972: "pmsm-webrctl",
+ 2973: "svnetworks",
+ 2974: "signal",
+ 2975: "fjmpcm",
+ 2976: "cns-srv-port",
+ 2977: "ttc-etap-ns",
+ 2978: "ttc-etap-ds",
+ 2979: "h263-video",
+ 2980: "wimd",
+ 2981: "mylxamport",
+ 2982: "iwb-whiteboard",
+ 2983: "netplan",
+ 2984: "hpidsadmin",
+ 2985: "hpidsagent",
+ 2986: "stonefalls",
+ 2987: "identify",
+ 2988: "hippad",
+ 2989: "zarkov",
+ 2990: "boscap",
+ 2991: "wkstn-mon",
+ 2992: "avenyo",
+ 2993: "veritas-vis1",
+ 2994: "veritas-vis2",
+ 2995: "idrs",
+ 2996: "vsixml",
+ 2997: "rebol",
+ 2998: "realsecure",
+ 2999: "remoteware-un",
+ 3000: "hbci",
+ 3002: "exlm-agent",
+ 3003: "cgms",
+ 3004: "csoftragent",
+ 3005: "geniuslm",
+ 3006: "ii-admin",
+ 3007: "lotusmtap",
+ 3008: "midnight-tech",
+ 3009: "pxc-ntfy",
+ 3010: "ping-pong",
+ 3011: "trusted-web",
+ 3012: "twsdss",
+ 3013: "gilatskysurfer",
+ 3014: "broker-service",
+ 3015: "nati-dstp",
+ 3016: "notify-srvr",
+ 3017: "event-listener",
+ 3018: "srvc-registry",
+ 3019: "resource-mgr",
+ 3020: "cifs",
+ 3021: "agriserver",
+ 3022: "csregagent",
+ 3023: "magicnotes",
+ 3024: "nds-sso",
+ 3025: "arepa-raft",
+ 3026: "agri-gateway",
+ 3027: "LiebDevMgmt-C",
+ 3028: "LiebDevMgmt-DM",
+ 3029: "LiebDevMgmt-A",
+ 3030: "arepa-cas",
+ 3031: "eppc",
+ 3032: "redwood-chat",
+ 3033: "pdb",
+ 3034: "osmosis-aeea",
+ 3035: "fjsv-gssagt",
+ 3036: "hagel-dump",
+ 3037: "hp-san-mgmt",
+ 3038: "santak-ups",
+ 3039: "cogitate",
+ 3040: "tomato-springs",
+ 3041: "di-traceware",
+ 3042: "journee",
+ 3043: "brp",
+ 3044: "epp",
+ 3045: "responsenet",
+ 3046: "di-ase",
+ 3047: "hlserver",
+ 3048: "pctrader",
+ 3049: "nsws",
+ 3050: "gds-db",
+ 3051: "galaxy-server",
+ 3052: "apc-3052",
+ 3053: "dsom-server",
+ 3054: "amt-cnf-prot",
+ 3055: "policyserver",
+ 3056: "cdl-server",
+ 3057: "goahead-fldup",
+ 3058: "videobeans",
+ 3059: "qsoft",
+ 3060: "interserver",
+ 3061: "cautcpd",
+ 3062: "ncacn-ip-tcp",
+ 3063: "ncadg-ip-udp",
+ 3064: "rprt",
+ 3065: "slinterbase",
+ 3066: "netattachsdmp",
+ 3067: "fjhpjp",
+ 3068: "ls3bcast",
+ 3069: "ls3",
+ 3070: "mgxswitch",
+ 3072: "csd-monitor",
+ 3073: "vcrp",
+ 3074: "xbox",
+ 3075: "orbix-locator",
+ 3076: "orbix-config",
+ 3077: "orbix-loc-ssl",
+ 3078: "orbix-cfg-ssl",
+ 3079: "lv-frontpanel",
+ 3080: "stm-pproc",
+ 3081: "tl1-lv",
+ 3082: "tl1-raw",
+ 3083: "tl1-telnet",
+ 3084: "itm-mccs",
+ 3085: "pcihreq",
+ 3086: "jdl-dbkitchen",
+ 3087: "asoki-sma",
+ 3088: "xdtp",
+ 3089: "ptk-alink",
+ 3090: "stss",
+ 3091: "1ci-smcs",
+ 3093: "rapidmq-center",
+ 3094: "rapidmq-reg",
+ 3095: "panasas",
+ 3096: "ndl-aps",
+ 3098: "umm-port",
+ 3099: "chmd",
+ 3100: "opcon-xps",
+ 3101: "hp-pxpib",
+ 3102: "slslavemon",
+ 3103: "autocuesmi",
+ 3104: "autocuetime",
+ 3105: "cardbox",
+ 3106: "cardbox-http",
+ 3107: "business",
+ 3108: "geolocate",
+ 3109: "personnel",
+ 3110: "sim-control",
+ 3111: "wsynch",
+ 3112: "ksysguard",
+ 3113: "cs-auth-svr",
+ 3114: "ccmad",
+ 3115: "mctet-master",
+ 3116: "mctet-gateway",
+ 3117: "mctet-jserv",
+ 3118: "pkagent",
+ 3119: "d2000kernel",
+ 3120: "d2000webserver",
+ 3122: "vtr-emulator",
+ 3123: "edix",
+ 3124: "beacon-port",
+ 3125: "a13-an",
+ 3127: "ctx-bridge",
+ 3128: "ndl-aas",
+ 3129: "netport-id",
+ 3130: "icpv2",
+ 3131: "netbookmark",
+ 3132: "ms-rule-engine",
+ 3133: "prism-deploy",
+ 3134: "ecp",
+ 3135: "peerbook-port",
+ 3136: "grubd",
+ 3137: "rtnt-1",
+ 3138: "rtnt-2",
+ 3139: "incognitorv",
+ 3140: "ariliamulti",
+ 3141: "vmodem",
+ 3142: "rdc-wh-eos",
+ 3143: "seaview",
+ 3144: "tarantella",
+ 3145: "csi-lfap",
+ 3146: "bears-02",
+ 3147: "rfio",
+ 3148: "nm-game-admin",
+ 3149: "nm-game-server",
+ 3150: "nm-asses-admin",
+ 3151: "nm-assessor",
+ 3152: "feitianrockey",
+ 3153: "s8-client-port",
+ 3154: "ccmrmi",
+ 3155: "jpegmpeg",
+ 3156: "indura",
+ 3157: "e3consultants",
+ 3158: "stvp",
+ 3159: "navegaweb-port",
+ 3160: "tip-app-server",
+ 3161: "doc1lm",
+ 3162: "sflm",
+ 3163: "res-sap",
+ 3164: "imprs",
+ 3165: "newgenpay",
+ 3166: "sossecollector",
+ 3167: "nowcontact",
+ 3168: "poweronnud",
+ 3169: "serverview-as",
+ 3170: "serverview-asn",
+ 3171: "serverview-gf",
+ 3172: "serverview-rm",
+ 3173: "serverview-icc",
+ 3174: "armi-server",
+ 3175: "t1-e1-over-ip",
+ 3176: "ars-master",
+ 3177: "phonex-port",
+ 3178: "radclientport",
+ 3179: "h2gf-w-2m",
+ 3180: "mc-brk-srv",
+ 3181: "bmcpatrolagent",
+ 3182: "bmcpatrolrnvu",
+ 3183: "cops-tls",
+ 3184: "apogeex-port",
+ 3185: "smpppd",
+ 3186: "iiw-port",
+ 3187: "odi-port",
+ 3188: "brcm-comm-port",
+ 3189: "pcle-infex",
+ 3190: "csvr-proxy",
+ 3191: "csvr-sslproxy",
+ 3192: "firemonrcc",
+ 3193: "spandataport",
+ 3194: "magbind",
+ 3195: "ncu-1",
+ 3196: "ncu-2",
+ 3197: "embrace-dp-s",
+ 3198: "embrace-dp-c",
+ 3199: "dmod-workspace",
+ 3200: "tick-port",
+ 3201: "cpq-tasksmart",
+ 3202: "intraintra",
+ 3203: "netwatcher-mon",
+ 3204: "netwatcher-db",
+ 3205: "isns",
+ 3206: "ironmail",
+ 3207: "vx-auth-port",
+ 3208: "pfu-prcallback",
+ 3209: "netwkpathengine",
+ 3210: "flamenco-proxy",
+ 3211: "avsecuremgmt",
+ 3212: "surveyinst",
+ 3213: "neon24x7",
+ 3214: "jmq-daemon-1",
+ 3215: "jmq-daemon-2",
+ 3216: "ferrari-foam",
+ 3217: "unite",
+ 3218: "smartpackets",
+ 3219: "wms-messenger",
+ 3220: "xnm-ssl",
+ 3221: "xnm-clear-text",
+ 3222: "glbp",
+ 3223: "digivote",
+ 3224: "aes-discovery",
+ 3225: "fcip-port",
+ 3226: "isi-irp",
+ 3227: "dwnmshttp",
+ 3228: "dwmsgserver",
+ 3229: "global-cd-port",
+ 3230: "sftdst-port",
+ 3231: "vidigo",
+ 3232: "mdtp",
+ 3233: "whisker",
+ 3234: "alchemy",
+ 3235: "mdap-port",
+ 3236: "apparenet-ts",
+ 3237: "apparenet-tps",
+ 3238: "apparenet-as",
+ 3239: "apparenet-ui",
+ 3240: "triomotion",
+ 3241: "sysorb",
+ 3242: "sdp-id-port",
+ 3243: "timelot",
+ 3244: "onesaf",
+ 3245: "vieo-fe",
+ 3246: "dvt-system",
+ 3247: "dvt-data",
+ 3248: "procos-lm",
+ 3249: "ssp",
+ 3250: "hicp",
+ 3251: "sysscanner",
+ 3252: "dhe",
+ 3253: "pda-data",
+ 3254: "pda-sys",
+ 3255: "semaphore",
+ 3256: "cpqrpm-agent",
+ 3257: "cpqrpm-server",
+ 3258: "ivecon-port",
+ 3259: "epncdp2",
+ 3260: "iscsi-target",
+ 3261: "winshadow",
+ 3262: "necp",
+ 3263: "ecolor-imager",
+ 3264: "ccmail",
+ 3265: "altav-tunnel",
+ 3266: "ns-cfg-server",
+ 3267: "ibm-dial-out",
+ 3268: "msft-gc",
+ 3269: "msft-gc-ssl",
+ 3270: "verismart",
+ 3271: "csoft-prev",
+ 3272: "user-manager",
+ 3273: "sxmp",
+ 3274: "ordinox-server",
+ 3275: "samd",
+ 3276: "maxim-asics",
+ 3277: "awg-proxy",
+ 3278: "lkcmserver",
+ 3279: "admind",
+ 3280: "vs-server",
+ 3281: "sysopt",
+ 3282: "datusorb",
+ 3283: "Apple Remote Desktop (Net Assistant)",
+ 3284: "4talk",
+ 3285: "plato",
+ 3286: "e-net",
+ 3287: "directvdata",
+ 3288: "cops",
+ 3289: "enpc",
+ 3290: "caps-lm",
+ 3291: "sah-lm",
+ 3292: "cart-o-rama",
+ 3293: "fg-fps",
+ 3294: "fg-gip",
+ 3295: "dyniplookup",
+ 3296: "rib-slm",
+ 3297: "cytel-lm",
+ 3298: "deskview",
+ 3299: "pdrncs",
+ 3302: "mcs-fastmail",
+ 3303: "opsession-clnt",
+ 3304: "opsession-srvr",
+ 3305: "odette-ftp",
+ 3306: "mysql",
+ 3307: "opsession-prxy",
+ 3308: "tns-server",
+ 3309: "tns-adv",
+ 3310: "dyna-access",
+ 3311: "mcns-tel-ret",
+ 3312: "appman-server",
+ 3313: "uorb",
+ 3314: "uohost",
+ 3315: "cdid",
+ 3316: "aicc-cmi",
+ 3317: "vsaiport",
+ 3318: "ssrip",
+ 3319: "sdt-lmd",
+ 3320: "officelink2000",
+ 3321: "vnsstr",
+ 3326: "sftu",
+ 3327: "bbars",
+ 3328: "egptlm",
+ 3329: "hp-device-disc",
+ 3330: "mcs-calypsoicf",
+ 3331: "mcs-messaging",
+ 3332: "mcs-mailsvr",
+ 3333: "dec-notes",
+ 3334: "directv-web",
+ 3335: "directv-soft",
+ 3336: "directv-tick",
+ 3337: "directv-catlg",
+ 3338: "anet-b",
+ 3339: "anet-l",
+ 3340: "anet-m",
+ 3341: "anet-h",
+ 3342: "webtie",
+ 3343: "ms-cluster-net",
+ 3344: "bnt-manager",
+ 3345: "influence",
+ 3346: "trnsprntproxy",
+ 3347: "phoenix-rpc",
+ 3348: "pangolin-laser",
+ 3349: "chevinservices",
+ 3350: "findviatv",
+ 3351: "btrieve",
+ 3352: "ssql",
+ 3353: "fatpipe",
+ 3354: "suitjd",
+ 3355: "ordinox-dbase",
+ 3356: "upnotifyps",
+ 3357: "adtech-test",
+ 3358: "mpsysrmsvr",
+ 3359: "wg-netforce",
+ 3360: "kv-server",
+ 3361: "kv-agent",
+ 3362: "dj-ilm",
+ 3363: "nati-vi-server",
+ 3364: "creativeserver",
+ 3365: "contentserver",
+ 3366: "creativepartnr",
+ 3372: "tip2",
+ 3373: "lavenir-lm",
+ 3374: "cluster-disc",
+ 3375: "vsnm-agent",
+ 3376: "cdbroker",
+ 3377: "cogsys-lm",
+ 3378: "wsicopy",
+ 3379: "socorfs",
+ 3380: "sns-channels",
+ 3381: "geneous",
+ 3382: "fujitsu-neat",
+ 3383: "esp-lm",
+ 3384: "hp-clic",
+ 3385: "qnxnetman",
+ 3386: "gprs-sig",
+ 3387: "backroomnet",
+ 3388: "cbserver",
+ 3389: "ms-wbt-server",
+ 3390: "dsc",
+ 3391: "savant",
+ 3392: "efi-lm",
+ 3393: "d2k-tapestry1",
+ 3394: "d2k-tapestry2",
+ 3395: "dyna-lm",
+ 3396: "printer-agent",
+ 3397: "cloanto-lm",
+ 3398: "mercantile",
+ 3399: "csms",
+ 3400: "csms2",
+ 3401: "filecast",
+ 3402: "fxaengine-net",
+ 3405: "nokia-ann-ch1",
+ 3406: "nokia-ann-ch2",
+ 3407: "ldap-admin",
+ 3408: "BESApi",
+ 3409: "networklens",
+ 3410: "networklenss",
+ 3411: "biolink-auth",
+ 3412: "xmlblaster",
+ 3413: "svnet",
+ 3414: "wip-port",
+ 3415: "bcinameservice",
+ 3416: "commandport",
+ 3417: "csvr",
+ 3418: "rnmap",
+ 3419: "softaudit",
+ 3420: "ifcp-port",
+ 3421: "bmap",
+ 3422: "rusb-sys-port",
+ 3423: "xtrm",
+ 3424: "xtrms",
+ 3425: "agps-port",
+ 3426: "arkivio",
+ 3427: "websphere-snmp",
+ 3428: "twcss",
+ 3429: "gcsp",
+ 3430: "ssdispatch",
+ 3431: "ndl-als",
+ 3432: "osdcp",
+ 3433: "opnet-smp",
+ 3434: "opencm",
+ 3435: "pacom",
+ 3436: "gc-config",
+ 3437: "autocueds",
+ 3438: "spiral-admin",
+ 3439: "hri-port",
+ 3440: "ans-console",
+ 3441: "connect-client",
+ 3442: "connect-server",
+ 3443: "ov-nnm-websrv",
+ 3444: "denali-server",
+ 3445: "monp",
+ 3446: "3comfaxrpc",
+ 3447: "directnet",
+ 3448: "dnc-port",
+ 3449: "hotu-chat",
+ 3450: "castorproxy",
+ 3451: "asam",
+ 3452: "sabp-signal",
+ 3453: "pscupd",
+ 3454: "mira",
+ 3455: "prsvp",
+ 3456: "vat",
+ 3457: "vat-control",
+ 3458: "d3winosfi",
+ 3459: "integral",
+ 3460: "edm-manager",
+ 3461: "edm-stager",
+ 3462: "edm-std-notify",
+ 3463: "edm-adm-notify",
+ 3464: "edm-mgr-sync",
+ 3465: "edm-mgr-cntrl",
+ 3466: "workflow",
+ 3467: "rcst",
+ 3468: "ttcmremotectrl",
+ 3469: "pluribus",
+ 3470: "jt400",
+ 3471: "jt400-ssl",
+ 3472: "jaugsremotec-1",
+ 3473: "jaugsremotec-2",
+ 3474: "ttntspauto",
+ 3475: "genisar-port",
+ 3476: "nppmp",
+ 3477: "ecomm",
+ 3478: "stun",
+ 3479: "twrpc",
+ 3480: "plethora",
+ 3481: "cleanerliverc",
+ 3482: "vulture",
+ 3483: "slim-devices",
+ 3484: "gbs-stp",
+ 3485: "celatalk",
+ 3486: "ifsf-hb-port",
+ 3487: "ltcudp",
+ 3488: "fs-rh-srv",
+ 3489: "dtp-dia",
+ 3490: "colubris",
+ 3491: "swr-port",
+ 3492: "tvdumtray-port",
+ 3493: "nut",
+ 3494: "ibm3494",
+ 3495: "seclayer-tcp",
+ 3496: "seclayer-tls",
+ 3497: "ipether232port",
+ 3498: "dashpas-port",
+ 3499: "sccip-media",
+ 3500: "rtmp-port",
+ 3501: "isoft-p2p",
+ 3502: "avinstalldisc",
+ 3503: "lsp-ping",
+ 3504: "ironstorm",
+ 3505: "ccmcomm",
+ 3506: "apc-3506",
+ 3507: "nesh-broker",
+ 3508: "interactionweb",
+ 3509: "vt-ssl",
+ 3510: "xss-port",
+ 3511: "webmail-2",
+ 3512: "aztec",
+ 3513: "arcpd",
+ 3514: "must-p2p",
+ 3515: "must-backplane",
+ 3516: "smartcard-port",
+ 3517: "802-11-iapp",
+ 3518: "artifact-msg",
+ 3519: "galileo",
+ 3520: "galileolog",
+ 3521: "mc3ss",
+ 3522: "nssocketport",
+ 3523: "odeumservlink",
+ 3524: "ecmport",
+ 3525: "eisport",
+ 3526: "starquiz-port",
+ 3527: "beserver-msg-q",
+ 3528: "jboss-iiop",
+ 3529: "jboss-iiop-ssl",
+ 3530: "gf",
+ 3531: "joltid",
+ 3532: "raven-rmp",
+ 3533: "raven-rdp",
+ 3534: "urld-port",
+ 3535: "ms-la",
+ 3536: "snac",
+ 3537: "ni-visa-remote",
+ 3538: "ibm-diradm",
+ 3539: "ibm-diradm-ssl",
+ 3540: "pnrp-port",
+ 3541: "voispeed-port",
+ 3542: "hacl-monitor",
+ 3543: "qftest-lookup",
+ 3544: "teredo",
+ 3545: "camac",
+ 3547: "symantec-sim",
+ 3548: "interworld",
+ 3549: "tellumat-nms",
+ 3550: "ssmpp",
+ 3551: "apcupsd",
+ 3552: "taserver",
+ 3553: "rbr-discovery",
+ 3554: "questnotify",
+ 3555: "razor",
+ 3556: "sky-transport",
+ 3557: "personalos-001",
+ 3558: "mcp-port",
+ 3559: "cctv-port",
+ 3560: "iniserve-port",
+ 3561: "bmc-onekey",
+ 3562: "sdbproxy",
+ 3563: "watcomdebug",
+ 3564: "esimport",
+ 3567: "dof-eps",
+ 3568: "dof-tunnel-sec",
+ 3569: "mbg-ctrl",
+ 3570: "mccwebsvr-port",
+ 3571: "megardsvr-port",
+ 3572: "megaregsvrport",
+ 3573: "tag-ups-1",
+ 3574: "dmaf-caster",
+ 3575: "ccm-port",
+ 3576: "cmc-port",
+ 3577: "config-port",
+ 3578: "data-port",
+ 3579: "ttat3lb",
+ 3580: "nati-svrloc",
+ 3581: "kfxaclicensing",
+ 3582: "press",
+ 3583: "canex-watch",
+ 3584: "u-dbap",
+ 3585: "emprise-lls",
+ 3586: "emprise-lsc",
+ 3587: "p2pgroup",
+ 3588: "sentinel",
+ 3589: "isomair",
+ 3590: "wv-csp-sms",
+ 3591: "gtrack-server",
+ 3592: "gtrack-ne",
+ 3593: "bpmd",
+ 3594: "mediaspace",
+ 3595: "shareapp",
+ 3596: "iw-mmogame",
+ 3597: "a14",
+ 3598: "a15",
+ 3599: "quasar-server",
+ 3600: "trap-daemon",
+ 3601: "visinet-gui",
+ 3602: "infiniswitchcl",
+ 3603: "int-rcv-cntrl",
+ 3604: "bmc-jmx-port",
+ 3605: "comcam-io",
+ 3606: "splitlock",
+ 3607: "precise-i3",
+ 3608: "trendchip-dcp",
+ 3609: "cpdi-pidas-cm",
+ 3610: "echonet",
+ 3611: "six-degrees",
+ 3612: "hp-dataprotect",
+ 3613: "alaris-disc",
+ 3614: "sigma-port",
+ 3615: "start-network",
+ 3616: "cd3o-protocol",
+ 3617: "sharp-server",
+ 3618: "aairnet-1",
+ 3619: "aairnet-2",
+ 3620: "ep-pcp",
+ 3621: "ep-nsp",
+ 3622: "ff-lr-port",
+ 3623: "haipe-discover",
+ 3624: "dist-upgrade",
+ 3625: "volley",
+ 3626: "bvcdaemon-port",
+ 3627: "jamserverport",
+ 3628: "ept-machine",
+ 3629: "escvpnet",
+ 3630: "cs-remote-db",
+ 3631: "cs-services",
+ 3632: "distcc",
+ 3633: "wacp",
+ 3634: "hlibmgr",
+ 3635: "sdo",
+ 3636: "servistaitsm",
+ 3637: "scservp",
+ 3638: "ehp-backup",
+ 3639: "xap-ha",
+ 3640: "netplay-port1",
+ 3641: "netplay-port2",
+ 3642: "juxml-port",
+ 3643: "audiojuggler",
+ 3644: "ssowatch",
+ 3645: "cyc",
+ 3646: "xss-srv-port",
+ 3647: "splitlock-gw",
+ 3648: "fjcp",
+ 3649: "nmmp",
+ 3650: "prismiq-plugin",
+ 3651: "xrpc-registry",
+ 3652: "vxcrnbuport",
+ 3653: "tsp",
+ 3654: "vaprtm",
+ 3655: "abatemgr",
+ 3656: "abatjss",
+ 3657: "immedianet-bcn",
+ 3658: "ps-ams",
+ 3659: "apple-sasl",
+ 3660: "can-nds-ssl",
+ 3661: "can-ferret-ssl",
+ 3662: "pserver",
+ 3663: "dtp",
+ 3664: "ups-engine",
+ 3665: "ent-engine",
+ 3666: "eserver-pap",
+ 3667: "infoexch",
+ 3668: "dell-rm-port",
+ 3669: "casanswmgmt",
+ 3670: "smile",
+ 3671: "efcp",
+ 3672: "lispworks-orb",
+ 3673: "mediavault-gui",
+ 3674: "wininstall-ipc",
+ 3675: "calltrax",
+ 3676: "va-pacbase",
+ 3677: "roverlog",
+ 3678: "ipr-dglt",
+ 3679: "Escale (Newton Dock)",
+ 3680: "npds-tracker",
+ 3681: "bts-x73",
+ 3682: "cas-mapi",
+ 3683: "bmc-ea",
+ 3684: "faxstfx-port",
+ 3685: "dsx-agent",
+ 3686: "tnmpv2",
+ 3687: "simple-push",
+ 3688: "simple-push-s",
+ 3689: "daap",
+ 3690: "svn",
+ 3691: "magaya-network",
+ 3692: "intelsync",
+ 3695: "bmc-data-coll",
+ 3696: "telnetcpcd",
+ 3697: "nw-license",
+ 3698: "sagectlpanel",
+ 3699: "kpn-icw",
+ 3700: "lrs-paging",
+ 3701: "netcelera",
+ 3702: "ws-discovery",
+ 3703: "adobeserver-3",
+ 3704: "adobeserver-4",
+ 3705: "adobeserver-5",
+ 3706: "rt-event",
+ 3707: "rt-event-s",
+ 3708: "sun-as-iiops",
+ 3709: "ca-idms",
+ 3710: "portgate-auth",
+ 3711: "edb-server2",
+ 3712: "sentinel-ent",
+ 3713: "tftps",
+ 3714: "delos-dms",
+ 3715: "anoto-rendezv",
+ 3716: "wv-csp-sms-cir",
+ 3717: "wv-csp-udp-cir",
+ 3718: "opus-services",
+ 3719: "itelserverport",
+ 3720: "ufastro-instr",
+ 3721: "xsync",
+ 3722: "xserveraid",
+ 3723: "sychrond",
+ 3724: "blizwow",
+ 3725: "na-er-tip",
+ 3726: "array-manager",
+ 3727: "e-mdu",
+ 3728: "e-woa",
+ 3729: "fksp-audit",
+ 3730: "client-ctrl",
+ 3731: "smap",
+ 3732: "m-wnn",
+ 3733: "multip-msg",
+ 3734: "synel-data",
+ 3735: "pwdis",
+ 3736: "rs-rmi",
+ 3738: "versatalk",
+ 3739: "launchbird-lm",
+ 3740: "heartbeat",
+ 3741: "wysdma",
+ 3742: "cst-port",
+ 3743: "ipcs-command",
+ 3744: "sasg",
+ 3745: "gw-call-port",
+ 3746: "linktest",
+ 3747: "linktest-s",
+ 3748: "webdata",
+ 3749: "cimtrak",
+ 3750: "cbos-ip-port",
+ 3751: "gprs-cube",
+ 3752: "vipremoteagent",
+ 3753: "nattyserver",
+ 3754: "timestenbroker",
+ 3755: "sas-remote-hlp",
+ 3756: "canon-capt",
+ 3757: "grf-port",
+ 3758: "apw-registry",
+ 3759: "exapt-lmgr",
+ 3760: "adtempusclient",
+ 3761: "gsakmp",
+ 3762: "gbs-smp",
+ 3763: "xo-wave",
+ 3764: "mni-prot-rout",
+ 3765: "rtraceroute",
+ 3767: "listmgr-port",
+ 3768: "rblcheckd",
+ 3769: "haipe-otnk",
+ 3770: "cindycollab",
+ 3771: "paging-port",
+ 3772: "ctp",
+ 3773: "ctdhercules",
+ 3774: "zicom",
+ 3775: "ispmmgr",
+ 3776: "dvcprov-port",
+ 3777: "jibe-eb",
+ 3778: "c-h-it-port",
+ 3779: "cognima",
+ 3780: "nnp",
+ 3781: "abcvoice-port",
+ 3782: "iso-tp0s",
+ 3783: "bim-pem",
+ 3784: "bfd-control",
+ 3785: "bfd-echo",
+ 3786: "upstriggervsw",
+ 3787: "fintrx",
+ 3788: "isrp-port",
+ 3789: "remotedeploy",
+ 3790: "quickbooksrds",
+ 3791: "tvnetworkvideo",
+ 3792: "sitewatch",
+ 3793: "dcsoftware",
+ 3794: "jaus",
+ 3795: "myblast",
+ 3796: "spw-dialer",
+ 3797: "idps",
+ 3798: "minilock",
+ 3799: "radius-dynauth",
+ 3800: "pwgpsi",
+ 3801: "ibm-mgr",
+ 3802: "vhd",
+ 3803: "soniqsync",
+ 3804: "iqnet-port",
+ 3805: "tcpdataserver",
+ 3806: "wsmlb",
+ 3807: "spugna",
+ 3808: "sun-as-iiops-ca",
+ 3809: "apocd",
+ 3810: "wlanauth",
+ 3811: "amp",
+ 3812: "neto-wol-server",
+ 3813: "rap-ip",
+ 3814: "neto-dcs",
+ 3815: "lansurveyorxml",
+ 3816: "sunlps-http",
+ 3817: "tapeware",
+ 3818: "crinis-hb",
+ 3819: "epl-slp",
+ 3820: "scp",
+ 3821: "pmcp",
+ 3822: "acp-discovery",
+ 3823: "acp-conduit",
+ 3824: "acp-policy",
+ 3825: "ffserver",
+ 3826: "warmux",
+ 3827: "netmpi",
+ 3828: "neteh",
+ 3829: "neteh-ext",
+ 3830: "cernsysmgmtagt",
+ 3831: "dvapps",
+ 3832: "xxnetserver",
+ 3833: "aipn-auth",
+ 3834: "spectardata",
+ 3835: "spectardb",
+ 3836: "markem-dcp",
+ 3837: "mkm-discovery",
+ 3838: "sos",
+ 3839: "amx-rms",
+ 3840: "flirtmitmir",
+ 3842: "nhci",
+ 3843: "quest-agent",
+ 3844: "rnm",
+ 3845: "v-one-spp",
+ 3846: "an-pcp",
+ 3847: "msfw-control",
+ 3848: "item",
+ 3849: "spw-dnspreload",
+ 3850: "qtms-bootstrap",
+ 3851: "spectraport",
+ 3852: "sse-app-config",
+ 3853: "sscan",
+ 3854: "stryker-com",
+ 3855: "opentrac",
+ 3856: "informer",
+ 3857: "trap-port",
+ 3858: "trap-port-mom",
+ 3859: "nav-port",
+ 3860: "sasp",
+ 3861: "winshadow-hd",
+ 3862: "giga-pocket",
+ 3863: "asap-udp",
+ 3865: "xpl",
+ 3866: "dzdaemon",
+ 3867: "dzoglserver",
+ 3869: "ovsam-mgmt",
+ 3870: "ovsam-d-agent",
+ 3871: "avocent-adsap",
+ 3872: "oem-agent",
+ 3873: "fagordnc",
+ 3874: "sixxsconfig",
+ 3875: "pnbscada",
+ 3876: "dl-agent",
+ 3877: "xmpcr-interface",
+ 3878: "fotogcad",
+ 3879: "appss-lm",
+ 3880: "igrs",
+ 3881: "idac",
+ 3882: "msdts1",
+ 3883: "vrpn",
+ 3884: "softrack-meter",
+ 3885: "topflow-ssl",
+ 3886: "nei-management",
+ 3887: "ciphire-data",
+ 3888: "ciphire-serv",
+ 3889: "dandv-tester",
+ 3890: "ndsconnect",
+ 3891: "rtc-pm-port",
+ 3892: "pcc-image-port",
+ 3893: "cgi-starapi",
+ 3894: "syam-agent",
+ 3895: "syam-smc",
+ 3896: "sdo-tls",
+ 3897: "sdo-ssh",
+ 3898: "senip",
+ 3899: "itv-control",
+ 3900: "udt-os",
+ 3901: "nimsh",
+ 3902: "nimaux",
+ 3903: "charsetmgr",
+ 3904: "omnilink-port",
+ 3905: "mupdate",
+ 3906: "topovista-data",
+ 3907: "imoguia-port",
+ 3908: "hppronetman",
+ 3909: "surfcontrolcpa",
+ 3910: "prnrequest",
+ 3911: "prnstatus",
+ 3912: "gbmt-stars",
+ 3913: "listcrt-port",
+ 3914: "listcrt-port-2",
+ 3915: "agcat",
+ 3916: "wysdmc",
+ 3917: "aftmux",
+ 3918: "pktcablemmcops",
+ 3919: "hyperip",
+ 3920: "exasoftport1",
+ 3921: "herodotus-net",
+ 3922: "sor-update",
+ 3923: "symb-sb-port",
+ 3924: "mpl-gprs-port",
+ 3925: "zmp",
+ 3926: "winport",
+ 3927: "natdataservice",
+ 3928: "netboot-pxe",
+ 3929: "smauth-port",
+ 3930: "syam-webserver",
+ 3931: "msr-plugin-port",
+ 3932: "dyn-site",
+ 3933: "plbserve-port",
+ 3934: "sunfm-port",
+ 3935: "sdp-portmapper",
+ 3936: "mailprox",
+ 3937: "dvbservdsc",
+ 3938: "dbcontrol-agent",
+ 3939: "aamp",
+ 3940: "xecp-node",
+ 3941: "homeportal-web",
+ 3942: "srdp",
+ 3943: "tig",
+ 3944: "sops",
+ 3945: "emcads",
+ 3946: "backupedge",
+ 3947: "ccp",
+ 3948: "apdap",
+ 3949: "drip",
+ 3950: "namemunge",
+ 3951: "pwgippfax",
+ 3952: "i3-sessionmgr",
+ 3953: "xmlink-connect",
+ 3954: "adrep",
+ 3955: "p2pcommunity",
+ 3956: "gvcp",
+ 3957: "mqe-broker",
+ 3958: "mqe-agent",
+ 3959: "treehopper",
+ 3960: "bess",
+ 3961: "proaxess",
+ 3962: "sbi-agent",
+ 3963: "thrp",
+ 3964: "sasggprs",
+ 3965: "ati-ip-to-ncpe",
+ 3966: "bflckmgr",
+ 3967: "ppsms",
+ 3968: "ianywhere-dbns",
+ 3969: "landmarks",
+ 3970: "lanrevagent",
+ 3971: "lanrevserver",
+ 3972: "iconp",
+ 3973: "progistics",
+ 3974: "citysearch",
+ 3975: "airshot",
+ 3976: "opswagent",
+ 3977: "opswmanager",
+ 3978: "secure-cfg-svr",
+ 3979: "smwan",
+ 3980: "acms",
+ 3981: "starfish",
+ 3982: "eis",
+ 3983: "eisp",
+ 3984: "mapper-nodemgr",
+ 3985: "mapper-mapethd",
+ 3986: "mapper-ws-ethd",
+ 3987: "centerline",
+ 3988: "dcs-config",
+ 3989: "bv-queryengine",
+ 3990: "bv-is",
+ 3991: "bv-smcsrv",
+ 3992: "bv-ds",
+ 3993: "bv-agent",
+ 3995: "iss-mgmt-ssl",
+ 3996: "abcsoftware",
+ 3997: "agentsease-db",
+ 3998: "dnx",
+ 3999: "nvcnet",
+ 4000: "terabase",
+ 4001: "newoak",
+ 4002: "pxc-spvr-ft",
+ 4003: "pxc-splr-ft",
+ 4004: "pxc-roid",
+ 4005: "pxc-pin",
+ 4006: "pxc-spvr",
+ 4007: "pxc-splr",
+ 4008: "netcheque",
+ 4009: "chimera-hwm",
+ 4010: "samsung-unidex",
+ 4011: "altserviceboot",
+ 4012: "pda-gate",
+ 4013: "acl-manager",
+ 4014: "taiclock",
+ 4015: "talarian-mcast1",
+ 4016: "talarian-mcast2",
+ 4017: "talarian-mcast3",
+ 4018: "talarian-mcast4",
+ 4019: "talarian-mcast5",
+ 4020: "trap",
+ 4021: "nexus-portal",
+ 4022: "dnox",
+ 4023: "esnm-zoning",
+ 4024: "tnp1-port",
+ 4025: "partimage",
+ 4026: "as-debug",
+ 4027: "bxp",
+ 4028: "dtserver-port",
+ 4029: "ip-qsig",
+ 4030: "jdmn-port",
+ 4031: "suucp",
+ 4032: "vrts-auth-port",
+ 4033: "sanavigator",
+ 4034: "ubxd",
+ 4035: "wap-push-http",
+ 4036: "wap-push-https",
+ 4037: "ravehd",
+ 4038: "fazzt-ptp",
+ 4039: "fazzt-admin",
+ 4040: "yo-main",
+ 4041: "houston",
+ 4042: "ldxp",
+ 4043: "nirp",
+ 4044: "ltp",
+ 4045: "npp",
+ 4046: "acp-proto",
+ 4047: "ctp-state",
+ 4049: "wafs",
+ 4050: "cisco-wafs",
+ 4051: "cppdp",
+ 4052: "interact",
+ 4053: "ccu-comm-1",
+ 4054: "ccu-comm-2",
+ 4055: "ccu-comm-3",
+ 4056: "lms",
+ 4057: "wfm",
+ 4058: "kingfisher",
+ 4059: "dlms-cosem",
+ 4060: "dsmeter-iatc",
+ 4061: "ice-location",
+ 4062: "ice-slocation",
+ 4063: "ice-router",
+ 4064: "ice-srouter",
+ 4065: "avanti-cdp",
+ 4066: "pmas",
+ 4067: "idp",
+ 4068: "ipfltbcst",
+ 4069: "minger",
+ 4070: "tripe",
+ 4071: "aibkup",
+ 4072: "zieto-sock",
+ 4073: "iRAPP",
+ 4074: "cequint-cityid",
+ 4075: "perimlan",
+ 4076: "seraph",
+ 4077: "ascomalarm",
+ 4079: "santools",
+ 4080: "lorica-in",
+ 4081: "lorica-in-sec",
+ 4082: "lorica-out",
+ 4083: "lorica-out-sec",
+ 4084: "fortisphere-vm",
+ 4086: "ftsync",
+ 4089: "opencore",
+ 4090: "omasgport",
+ 4091: "ewinstaller",
+ 4092: "ewdgs",
+ 4093: "pvxpluscs",
+ 4094: "sysrqd",
+ 4095: "xtgui",
+ 4096: "bre",
+ 4097: "patrolview",
+ 4098: "drmsfsd",
+ 4099: "dpcp",
+ 4100: "igo-incognito",
+ 4101: "brlp-0",
+ 4102: "brlp-1",
+ 4103: "brlp-2",
+ 4104: "brlp-3",
+ 4105: "shofar",
+ 4106: "synchronite",
+ 4107: "j-ac",
+ 4108: "accel",
+ 4109: "izm",
+ 4110: "g2tag",
+ 4111: "xgrid",
+ 4112: "apple-vpns-rp",
+ 4113: "aipn-reg",
+ 4114: "jomamqmonitor",
+ 4115: "cds",
+ 4116: "smartcard-tls",
+ 4117: "hillrserv",
+ 4118: "netscript",
+ 4119: "assuria-slm",
+ 4121: "e-builder",
+ 4122: "fprams",
+ 4123: "z-wave",
+ 4124: "tigv2",
+ 4125: "opsview-envoy",
+ 4126: "ddrepl",
+ 4127: "unikeypro",
+ 4128: "nufw",
+ 4129: "nuauth",
+ 4130: "fronet",
+ 4131: "stars",
+ 4132: "nuts-dem",
+ 4133: "nuts-bootp",
+ 4134: "nifty-hmi",
+ 4135: "cl-db-attach",
+ 4136: "cl-db-request",
+ 4137: "cl-db-remote",
+ 4138: "nettest",
+ 4139: "thrtx",
+ 4140: "cedros-fds",
+ 4141: "oirtgsvc",
+ 4142: "oidocsvc",
+ 4143: "oidsr",
+ 4145: "vvr-control",
+ 4146: "tgcconnect",
+ 4147: "vrxpservman",
+ 4148: "hhb-handheld",
+ 4149: "agslb",
+ 4150: "PowerAlert-nsa",
+ 4151: "menandmice-noh",
+ 4152: "idig-mux",
+ 4153: "mbl-battd",
+ 4154: "atlinks",
+ 4155: "bzr",
+ 4156: "stat-results",
+ 4157: "stat-scanner",
+ 4158: "stat-cc",
+ 4159: "nss",
+ 4160: "jini-discovery",
+ 4161: "omscontact",
+ 4162: "omstopology",
+ 4163: "silverpeakpeer",
+ 4164: "silverpeakcomm",
+ 4165: "altcp",
+ 4166: "joost",
+ 4167: "ddgn",
+ 4168: "pslicser",
+ 4169: "iadt-disc",
+ 4172: "pcoip",
+ 4173: "mma-discovery",
+ 4174: "sm-disc",
+ 4177: "wello",
+ 4178: "storman",
+ 4179: "MaxumSP",
+ 4180: "httpx",
+ 4181: "macbak",
+ 4182: "pcptcpservice",
+ 4183: "cyborgnet",
+ 4184: "universe-suite",
+ 4185: "wcpp",
+ 4188: "vatata",
+ 4191: "dsmipv6",
+ 4192: "azeti-bd",
+ 4197: "hctl",
+ 4199: "eims-admin",
+ 4300: "corelccam",
+ 4301: "d-data",
+ 4302: "d-data-control",
+ 4303: "srcp",
+ 4304: "owserver",
+ 4305: "batman",
+ 4306: "pinghgl",
+ 4307: "trueconf",
+ 4308: "compx-lockview",
+ 4309: "dserver",
+ 4310: "mirrtex",
+ 4320: "fdt-rcatp",
+ 4321: "rwhois",
+ 4322: "trim-event",
+ 4323: "trim-ice",
+ 4325: "geognosisman",
+ 4326: "geognosis",
+ 4327: "jaxer-web",
+ 4328: "jaxer-manager",
+ 4333: "ahsp",
+ 4340: "gaia",
+ 4341: "lisp-data",
+ 4342: "lisp-control",
+ 4343: "unicall",
+ 4344: "vinainstall",
+ 4345: "m4-network-as",
+ 4346: "elanlm",
+ 4347: "lansurveyor",
+ 4348: "itose",
+ 4349: "fsportmap",
+ 4350: "net-device",
+ 4351: "plcy-net-svcs",
+ 4352: "pjlink",
+ 4353: "f5-iquery",
+ 4354: "qsnet-trans",
+ 4355: "qsnet-workst",
+ 4356: "qsnet-assist",
+ 4357: "qsnet-cond",
+ 4358: "qsnet-nucl",
+ 4359: "omabcastltkm",
+ 4361: "nacnl",
+ 4362: "afore-vdp-disc",
+ 4366: "shadowstream",
+ 4368: "wxbrief",
+ 4369: "epmd",
+ 4370: "elpro-tunnel",
+ 4371: "l2c-disc",
+ 4372: "l2c-data",
+ 4373: "remctl",
+ 4375: "tolteces",
+ 4376: "bip",
+ 4377: "cp-spxsvr",
+ 4378: "cp-spxdpy",
+ 4379: "ctdb",
+ 4389: "xandros-cms",
+ 4390: "wiegand",
+ 4394: "apwi-disc",
+ 4395: "omnivisionesx",
+ 4400: "ds-srv",
+ 4401: "ds-srvr",
+ 4402: "ds-clnt",
+ 4403: "ds-user",
+ 4404: "ds-admin",
+ 4405: "ds-mail",
+ 4406: "ds-slp",
+ 4412: "smallchat",
+ 4413: "avi-nms-disc",
+ 4416: "pjj-player-disc",
+ 4418: "axysbridge",
+ 4420: "nvm-express",
+ 4425: "netrockey6",
+ 4426: "beacon-port-2",
+ 4430: "rsqlserver",
+ 4432: "l-acoustics",
+ 4441: "netblox",
+ 4442: "saris",
+ 4443: "pharos",
+ 4444: "krb524",
+ 4445: "upnotifyp",
+ 4446: "n1-fwp",
+ 4447: "n1-rmgmt",
+ 4448: "asc-slmd",
+ 4449: "privatewire",
+ 4450: "camp",
+ 4451: "ctisystemmsg",
+ 4452: "ctiprogramload",
+ 4453: "nssalertmgr",
+ 4454: "nssagentmgr",
+ 4455: "prchat-user",
+ 4456: "prchat-server",
+ 4457: "prRegister",
+ 4458: "mcp",
+ 4484: "hpssmgmt",
+ 4486: "icms",
+ 4488: "awacs-ice",
+ 4500: "ipsec-nat-t",
+ 4534: "armagetronad",
+ 4535: "ehs",
+ 4536: "ehs-ssl",
+ 4537: "wssauthsvc",
+ 4538: "swx-gate",
+ 4545: "worldscores",
+ 4546: "sf-lm",
+ 4547: "lanner-lm",
+ 4548: "synchromesh",
+ 4549: "aegate",
+ 4550: "gds-adppiw-db",
+ 4551: "ieee-mih",
+ 4552: "menandmice-mon",
+ 4554: "msfrs",
+ 4555: "rsip",
+ 4556: "dtn-bundle",
+ 4557: "mtcevrunqss",
+ 4558: "mtcevrunqman",
+ 4559: "hylafax",
+ 4566: "kwtc",
+ 4567: "tram",
+ 4568: "bmc-reporting",
+ 4569: "iax",
+ 4591: "l3t-at-an",
+ 4592: "hrpd-ith-at-an",
+ 4593: "ipt-anri-anri",
+ 4594: "ias-session",
+ 4595: "ias-paging",
+ 4596: "ias-neighbor",
+ 4597: "a21-an-1xbs",
+ 4598: "a16-an-an",
+ 4599: "a17-an-an",
+ 4600: "piranha1",
+ 4601: "piranha2",
+ 4621: "ventoso",
+ 4658: "playsta2-app",
+ 4659: "playsta2-lob",
+ 4660: "smaclmgr",
+ 4661: "kar2ouche",
+ 4662: "oms",
+ 4663: "noteit",
+ 4664: "ems",
+ 4665: "contclientms",
+ 4666: "eportcomm",
+ 4667: "mmacomm",
+ 4668: "mmaeds",
+ 4669: "eportcommdata",
+ 4670: "light",
+ 4671: "acter",
+ 4672: "rfa",
+ 4673: "cxws",
+ 4674: "appiq-mgmt",
+ 4675: "dhct-status",
+ 4676: "dhct-alerts",
+ 4677: "bcs",
+ 4678: "traversal",
+ 4679: "mgesupervision",
+ 4680: "mgemanagement",
+ 4681: "parliant",
+ 4682: "finisar",
+ 4683: "spike",
+ 4684: "rfid-rp1",
+ 4685: "autopac",
+ 4686: "msp-os",
+ 4687: "nst",
+ 4688: "mobile-p2p",
+ 4689: "altovacentral",
+ 4690: "prelude",
+ 4691: "mtn",
+ 4692: "conspiracy",
+ 4700: "netxms-agent",
+ 4701: "netxms-mgmt",
+ 4702: "netxms-sync",
+ 4711: "trinity-dist",
+ 4725: "truckstar",
+ 4726: "a26-fap-fgw",
+ 4727: "fcis-disc",
+ 4728: "capmux",
+ 4729: "gsmtap",
+ 4730: "gearman",
+ 4732: "ohmtrigger",
+ 4737: "ipdr-sp",
+ 4738: "solera-lpn",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 4741: "lumimgrd",
+ 4742: "sicct-sdp",
+ 4743: "openhpid",
+ 4744: "ifsp",
+ 4745: "fmp",
+ 4746: "intelliadm-disc",
+ 4747: "buschtrommel",
+ 4749: "profilemac",
+ 4750: "ssad",
+ 4751: "spocp",
+ 4752: "snap",
+ 4753: "simon-disc",
+ 4754: "gre-in-udp",
+ 4755: "gre-udp-dtls",
+ 4784: "bfd-multi-ctl",
+ 4785: "cncp",
+ 4789: "vxlan",
+ 4790: "vxlan-gpe",
+ 4791: "roce",
+ 4800: "iims",
+ 4801: "iwec",
+ 4802: "ilss",
+ 4803: "notateit-disc",
+ 4804: "aja-ntv4-disc",
+ 4827: "htcp",
+ 4837: "varadero-0",
+ 4838: "varadero-1",
+ 4839: "varadero-2",
+ 4840: "opcua-udp",
+ 4841: "quosa",
+ 4842: "gw-asv",
+ 4843: "opcua-tls",
+ 4844: "gw-log",
+ 4845: "wcr-remlib",
+ 4846: "contamac-icm",
+ 4847: "wfc",
+ 4848: "appserv-http",
+ 4849: "appserv-https",
+ 4850: "sun-as-nodeagt",
+ 4851: "derby-repli",
+ 4867: "unify-debug",
+ 4868: "phrelay",
+ 4869: "phrelaydbg",
+ 4870: "cc-tracking",
+ 4871: "wired",
+ 4876: "tritium-can",
+ 4877: "lmcs",
+ 4878: "inst-discovery",
+ 4881: "socp-t",
+ 4882: "socp-c",
+ 4884: "hivestor",
+ 4885: "abbs",
+ 4894: "lyskom",
+ 4899: "radmin-port",
+ 4900: "hfcs",
+ 4914: "bones",
+ 4936: "an-signaling",
+ 4937: "atsc-mh-ssc",
+ 4940: "eq-office-4940",
+ 4941: "eq-office-4941",
+ 4942: "eq-office-4942",
+ 4949: "munin",
+ 4950: "sybasesrvmon",
+ 4951: "pwgwims",
+ 4952: "sagxtsds",
+ 4969: "ccss-qmm",
+ 4970: "ccss-qsm",
+ 4980: "ctxs-vpp",
+ 4986: "mrip",
+ 4987: "smar-se-port1",
+ 4988: "smar-se-port2",
+ 4989: "parallel",
+ 4990: "busycal",
+ 4991: "vrt",
+ 4999: "hfcs-manager",
+ 5000: "commplex-main",
+ 5001: "commplex-link",
+ 5002: "rfe",
+ 5003: "fmpro-internal",
+ 5004: "avt-profile-1",
+ 5005: "avt-profile-2",
+ 5006: "wsm-server",
+ 5007: "wsm-server-ssl",
+ 5008: "synapsis-edge",
+ 5009: "winfs",
+ 5010: "telelpathstart",
+ 5011: "telelpathattack",
+ 5012: "nsp",
+ 5013: "fmpro-v6",
+ 5014: "onpsocket",
+ 5020: "zenginkyo-1",
+ 5021: "zenginkyo-2",
+ 5022: "mice",
+ 5023: "htuilsrv",
+ 5024: "scpi-telnet",
+ 5025: "scpi-raw",
+ 5026: "strexec-d",
+ 5027: "strexec-s",
+ 5029: "infobright",
+ 5030: "surfpass",
+ 5031: "dmp",
+ 5042: "asnaacceler8db",
+ 5043: "swxadmin",
+ 5044: "lxi-evntsvc",
+ 5046: "vpm-udp",
+ 5047: "iscape",
+ 5049: "ivocalize",
+ 5050: "mmcc",
+ 5051: "ita-agent",
+ 5052: "ita-manager",
+ 5053: "rlm-disc",
+ 5055: "unot",
+ 5056: "intecom-ps1",
+ 5057: "intecom-ps2",
+ 5058: "locus-disc",
+ 5059: "sds",
+ 5060: "sip",
+ 5061: "sips",
+ 5062: "na-localise",
+ 5064: "ca-1",
+ 5065: "ca-2",
+ 5066: "stanag-5066",
+ 5067: "authentx",
+ 5069: "i-net-2000-npr",
+ 5070: "vtsas",
+ 5071: "powerschool",
+ 5072: "ayiya",
+ 5073: "tag-pm",
+ 5074: "alesquery",
+ 5078: "pixelpusher",
+ 5079: "cp-spxrpts",
+ 5080: "onscreen",
+ 5081: "sdl-ets",
+ 5082: "qcp",
+ 5083: "qfp",
+ 5084: "llrp",
+ 5085: "encrypted-llrp",
+ 5092: "magpie",
+ 5093: "sentinel-lm",
+ 5094: "hart-ip",
+ 5099: "sentlm-srv2srv",
+ 5100: "socalia",
+ 5101: "talarian-udp",
+ 5102: "oms-nonsecure",
+ 5104: "tinymessage",
+ 5105: "hughes-ap",
+ 5111: "taep-as-svc",
+ 5112: "pm-cmdsvr",
+ 5116: "emb-proj-cmd",
+ 5120: "barracuda-bbs",
+ 5133: "nbt-pc",
+ 5136: "minotaur-sa",
+ 5137: "ctsd",
+ 5145: "rmonitor-secure",
+ 5150: "atmp",
+ 5151: "esri-sde",
+ 5152: "sde-discovery",
+ 5154: "bzflag",
+ 5155: "asctrl-agent",
+ 5164: "vpa-disc",
+ 5165: "ife-icorp",
+ 5166: "winpcs",
+ 5167: "scte104",
+ 5168: "scte30",
+ 5190: "aol",
+ 5191: "aol-1",
+ 5192: "aol-2",
+ 5193: "aol-3",
+ 5200: "targus-getdata",
+ 5201: "targus-getdata1",
+ 5202: "targus-getdata2",
+ 5203: "targus-getdata3",
+ 5223: "hpvirtgrp",
+ 5224: "hpvirtctrl",
+ 5225: "hp-server",
+ 5226: "hp-status",
+ 5227: "perfd",
+ 5234: "eenet",
+ 5235: "galaxy-network",
+ 5236: "padl2sim",
+ 5237: "mnet-discovery",
+ 5245: "downtools-disc",
+ 5246: "capwap-control",
+ 5247: "capwap-data",
+ 5248: "caacws",
+ 5249: "caaclang2",
+ 5250: "soagateway",
+ 5251: "caevms",
+ 5252: "movaz-ssc",
+ 5264: "3com-njack-1",
+ 5265: "3com-njack-2",
+ 5270: "cartographerxmp",
+ 5271: "cuelink-disc",
+ 5272: "pk",
+ 5282: "transmit-port",
+ 5298: "presence",
+ 5299: "nlg-data",
+ 5300: "hacl-hb",
+ 5301: "hacl-gs",
+ 5302: "hacl-cfg",
+ 5303: "hacl-probe",
+ 5304: "hacl-local",
+ 5305: "hacl-test",
+ 5306: "sun-mc-grp",
+ 5307: "sco-aip",
+ 5308: "cfengine",
+ 5309: "jprinter",
+ 5310: "outlaws",
+ 5312: "permabit-cs",
+ 5313: "rrdp",
+ 5314: "opalis-rbt-ipc",
+ 5315: "hacl-poll",
+ 5343: "kfserver",
+ 5344: "xkotodrcp",
+ 5349: "stuns",
+ 5350: "pcp-multicast",
+ 5351: "pcp",
+ 5352: "dns-llq",
+ 5353: "mdns",
+ 5354: "mdnsresponder",
+ 5355: "llmnr",
+ 5356: "ms-smlbiz",
+ 5357: "wsdapi",
+ 5358: "wsdapi-s",
+ 5359: "ms-alerter",
+ 5360: "ms-sideshow",
+ 5361: "ms-s-sideshow",
+ 5362: "serverwsd2",
+ 5363: "net-projection",
+ 5364: "kdnet",
+ 5397: "stresstester",
+ 5398: "elektron-admin",
+ 5399: "securitychase",
+ 5400: "excerpt",
+ 5401: "excerpts",
+ 5402: "mftp",
+ 5403: "hpoms-ci-lstn",
+ 5404: "hpoms-dps-lstn",
+ 5405: "netsupport",
+ 5406: "systemics-sox",
+ 5407: "foresyte-clear",
+ 5408: "foresyte-sec",
+ 5409: "salient-dtasrv",
+ 5410: "salient-usrmgr",
+ 5411: "actnet",
+ 5412: "continuus",
+ 5413: "wwiotalk",
+ 5414: "statusd",
+ 5415: "ns-server",
+ 5416: "sns-gateway",
+ 5417: "sns-agent",
+ 5418: "mcntp",
+ 5419: "dj-ice",
+ 5420: "cylink-c",
+ 5421: "netsupport2",
+ 5422: "salient-mux",
+ 5423: "virtualuser",
+ 5424: "beyond-remote",
+ 5425: "br-channel",
+ 5426: "devbasic",
+ 5427: "sco-peer-tta",
+ 5428: "telaconsole",
+ 5429: "base",
+ 5430: "radec-corp",
+ 5431: "park-agent",
+ 5432: "postgresql",
+ 5433: "pyrrho",
+ 5434: "sgi-arrayd",
+ 5435: "sceanics",
+ 5436: "pmip6-cntl",
+ 5437: "pmip6-data",
+ 5443: "spss",
+ 5450: "tiepie-disc",
+ 5453: "surebox",
+ 5454: "apc-5454",
+ 5455: "apc-5455",
+ 5456: "apc-5456",
+ 5461: "silkmeter",
+ 5462: "ttl-publisher",
+ 5463: "ttlpriceproxy",
+ 5464: "quailnet",
+ 5465: "netops-broker",
+ 5474: "apsolab-rpc",
+ 5500: "fcp-addr-srvr1",
+ 5501: "fcp-addr-srvr2",
+ 5502: "fcp-srvr-inst1",
+ 5503: "fcp-srvr-inst2",
+ 5504: "fcp-cics-gw1",
+ 5505: "checkoutdb",
+ 5506: "amc",
+ 5553: "sgi-eventmond",
+ 5554: "sgi-esphttp",
+ 5555: "personal-agent",
+ 5556: "freeciv",
+ 5567: "dof-dps-mc-sec",
+ 5568: "sdt",
+ 5569: "rdmnet-device",
+ 5573: "sdmmp",
+ 5580: "tmosms0",
+ 5581: "tmosms1",
+ 5582: "fac-restore",
+ 5583: "tmo-icon-sync",
+ 5584: "bis-web",
+ 5585: "bis-sync",
+ 5597: "ininmessaging",
+ 5598: "mctfeed",
+ 5599: "esinstall",
+ 5600: "esmmanager",
+ 5601: "esmagent",
+ 5602: "a1-msc",
+ 5603: "a1-bs",
+ 5604: "a3-sdunode",
+ 5605: "a4-sdunode",
+ 5627: "ninaf",
+ 5628: "htrust",
+ 5629: "symantec-sfdb",
+ 5630: "precise-comm",
+ 5631: "pcanywheredata",
+ 5632: "pcanywherestat",
+ 5633: "beorl",
+ 5634: "xprtld",
+ 5670: "zre-disc",
+ 5671: "amqps",
+ 5672: "amqp",
+ 5673: "jms",
+ 5674: "hyperscsi-port",
+ 5675: "v5ua",
+ 5676: "raadmin",
+ 5677: "questdb2-lnchr",
+ 5678: "rrac",
+ 5679: "dccm",
+ 5680: "auriga-router",
+ 5681: "ncxcp",
+ 5682: "brightcore",
+ 5683: "coap",
+ 5684: "coaps",
+ 5687: "gog-multiplayer",
+ 5688: "ggz",
+ 5689: "qmvideo",
+ 5713: "proshareaudio",
+ 5714: "prosharevideo",
+ 5715: "prosharedata",
+ 5716: "prosharerequest",
+ 5717: "prosharenotify",
+ 5718: "dpm",
+ 5719: "dpm-agent",
+ 5720: "ms-licensing",
+ 5721: "dtpt",
+ 5722: "msdfsr",
+ 5723: "omhs",
+ 5724: "omsdk",
+ 5728: "io-dist-group",
+ 5729: "openmail",
+ 5730: "unieng",
+ 5741: "ida-discover1",
+ 5742: "ida-discover2",
+ 5743: "watchdoc-pod",
+ 5744: "watchdoc",
+ 5745: "fcopy-server",
+ 5746: "fcopys-server",
+ 5747: "tunatic",
+ 5748: "tunalyzer",
+ 5750: "rscd",
+ 5755: "openmailg",
+ 5757: "x500ms",
+ 5766: "openmailns",
+ 5767: "s-openmail",
+ 5768: "openmailpxy",
+ 5769: "spramsca",
+ 5770: "spramsd",
+ 5771: "netagent",
+ 5777: "dali-port",
+ 5781: "3par-evts",
+ 5782: "3par-mgmt",
+ 5783: "3par-mgmt-ssl",
+ 5784: "ibar",
+ 5785: "3par-rcopy",
+ 5786: "cisco-redu",
+ 5787: "waascluster",
+ 5793: "xtreamx",
+ 5794: "spdp",
+ 5813: "icmpd",
+ 5814: "spt-automation",
+ 5859: "wherehoo",
+ 5863: "ppsuitemsg",
+ 5900: "rfb",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 5963: "indy",
+ 5968: "mppolicy-v5",
+ 5969: "mppolicy-mgr",
+ 5984: "couchdb",
+ 5985: "wsman",
+ 5986: "wsmans",
+ 5987: "wbem-rmi",
+ 5988: "wbem-http",
+ 5989: "wbem-https",
+ 5990: "wbem-exp-https",
+ 5991: "nuxsl",
+ 5992: "consul-insight",
+ 5999: "cvsup",
+ 6064: "ndl-ahp-svc",
+ 6065: "winpharaoh",
+ 6066: "ewctsp",
+ 6069: "trip",
+ 6070: "messageasap",
+ 6071: "ssdtp",
+ 6072: "diagnose-proc",
+ 6073: "directplay8",
+ 6074: "max",
+ 6080: "gue",
+ 6081: "geneve",
+ 6082: "p25cai",
+ 6083: "miami-bcast",
+ 6085: "konspire2b",
+ 6086: "pdtp",
+ 6087: "ldss",
+ 6088: "doglms-notify",
+ 6100: "synchronet-db",
+ 6101: "synchronet-rtc",
+ 6102: "synchronet-upd",
+ 6103: "rets",
+ 6104: "dbdb",
+ 6105: "primaserver",
+ 6106: "mpsserver",
+ 6107: "etc-control",
+ 6108: "sercomm-scadmin",
+ 6109: "globecast-id",
+ 6110: "softcm",
+ 6111: "spc",
+ 6112: "dtspcd",
+ 6118: "tipc",
+ 6122: "bex-webadmin",
+ 6123: "backup-express",
+ 6124: "pnbs",
+ 6133: "nbt-wol",
+ 6140: "pulsonixnls",
+ 6141: "meta-corp",
+ 6142: "aspentec-lm",
+ 6143: "watershed-lm",
+ 6144: "statsci1-lm",
+ 6145: "statsci2-lm",
+ 6146: "lonewolf-lm",
+ 6147: "montage-lm",
+ 6148: "ricardo-lm",
+ 6149: "tal-pod",
+ 6160: "ecmp-data",
+ 6161: "patrol-ism",
+ 6162: "patrol-coll",
+ 6163: "pscribe",
+ 6200: "lm-x",
+ 6201: "thermo-calc",
+ 6209: "qmtps",
+ 6222: "radmind",
+ 6241: "jeol-nsddp-1",
+ 6242: "jeol-nsddp-2",
+ 6243: "jeol-nsddp-3",
+ 6244: "jeol-nsddp-4",
+ 6251: "tl1-raw-ssl",
+ 6252: "tl1-ssh",
+ 6253: "crip",
+ 6268: "grid",
+ 6269: "grid-alt",
+ 6300: "bmc-grx",
+ 6301: "bmc-ctd-ldap",
+ 6306: "ufmp",
+ 6315: "scup-disc",
+ 6316: "abb-escp",
+ 6317: "nav-data",
+ 6320: "repsvc",
+ 6321: "emp-server1",
+ 6322: "emp-server2",
+ 6324: "hrd-ns-disc",
+ 6343: "sflow",
+ 6346: "gnutella-svc",
+ 6347: "gnutella-rtr",
+ 6350: "adap",
+ 6355: "pmcs",
+ 6360: "metaedit-mu",
+ 6363: "ndn",
+ 6370: "metaedit-se",
+ 6382: "metatude-mds",
+ 6389: "clariion-evr01",
+ 6390: "metaedit-ws",
+ 6417: "faxcomservice",
+ 6419: "svdrp-disc",
+ 6420: "nim-vdrshell",
+ 6421: "nim-wan",
+ 6443: "sun-sr-https",
+ 6444: "sge-qmaster",
+ 6445: "sge-execd",
+ 6446: "mysql-proxy",
+ 6455: "skip-cert-recv",
+ 6456: "skip-cert-send",
+ 6464: "ieee11073-20701",
+ 6471: "lvision-lm",
+ 6480: "sun-sr-http",
+ 6481: "servicetags",
+ 6482: "ldoms-mgmt",
+ 6483: "SunVTS-RMI",
+ 6484: "sun-sr-jms",
+ 6485: "sun-sr-iiop",
+ 6486: "sun-sr-iiops",
+ 6487: "sun-sr-iiop-aut",
+ 6488: "sun-sr-jmx",
+ 6489: "sun-sr-admin",
+ 6500: "boks",
+ 6501: "boks-servc",
+ 6502: "boks-servm",
+ 6503: "boks-clntd",
+ 6505: "badm-priv",
+ 6506: "badm-pub",
+ 6507: "bdir-priv",
+ 6508: "bdir-pub",
+ 6509: "mgcs-mfp-port",
+ 6510: "mcer-port",
+ 6511: "dccp-udp",
+ 6514: "syslog-tls",
+ 6515: "elipse-rec",
+ 6543: "lds-distrib",
+ 6544: "lds-dump",
+ 6547: "apc-6547",
+ 6548: "apc-6548",
+ 6549: "apc-6549",
+ 6550: "fg-sysupdate",
+ 6551: "sum",
+ 6558: "xdsxdm",
+ 6566: "sane-port",
+ 6568: "rp-reputation",
+ 6579: "affiliate",
+ 6580: "parsec-master",
+ 6581: "parsec-peer",
+ 6582: "parsec-game",
+ 6583: "joaJewelSuite",
+ 6619: "odette-ftps",
+ 6620: "kftp-data",
+ 6621: "kftp",
+ 6622: "mcftp",
+ 6623: "ktelnet",
+ 6626: "wago-service",
+ 6627: "nexgen",
+ 6628: "afesc-mc",
+ 6629: "nexgen-aux",
+ 6633: "cisco-vpath-tun",
+ 6634: "mpls-pm",
+ 6635: "mpls-udp",
+ 6636: "mpls-udp-dtls",
+ 6653: "openflow",
+ 6657: "palcom-disc",
+ 6670: "vocaltec-gold",
+ 6671: "p4p-portal",
+ 6672: "vision-server",
+ 6673: "vision-elmd",
+ 6678: "vfbp-disc",
+ 6679: "osaut",
+ 6689: "tsa",
+ 6696: "babel",
+ 6701: "kti-icad-srvr",
+ 6702: "e-design-net",
+ 6703: "e-design-web",
+ 6714: "ibprotocol",
+ 6715: "fibotrader-com",
+ 6767: "bmc-perf-agent",
+ 6768: "bmc-perf-mgrd",
+ 6769: "adi-gxp-srvprt",
+ 6770: "plysrv-http",
+ 6771: "plysrv-https",
+ 6784: "bfd-lag",
+ 6785: "dgpf-exchg",
+ 6786: "smc-jmx",
+ 6787: "smc-admin",
+ 6788: "smc-http",
+ 6790: "hnmp",
+ 6791: "hnm",
+ 6801: "acnet",
+ 6831: "ambit-lm",
+ 6841: "netmo-default",
+ 6842: "netmo-http",
+ 6850: "iccrushmore",
+ 6868: "acctopus-st",
+ 6888: "muse",
+ 6935: "ethoscan",
+ 6936: "xsmsvc",
+ 6946: "bioserver",
+ 6951: "otlp",
+ 6961: "jmact3",
+ 6962: "jmevt2",
+ 6963: "swismgr1",
+ 6964: "swismgr2",
+ 6965: "swistrap",
+ 6966: "swispol",
+ 6969: "acmsoda",
+ 6997: "MobilitySrv",
+ 6998: "iatp-highpri",
+ 6999: "iatp-normalpri",
+ 7000: "afs3-fileserver",
+ 7001: "afs3-callback",
+ 7002: "afs3-prserver",
+ 7003: "afs3-vlserver",
+ 7004: "afs3-kaserver",
+ 7005: "afs3-volser",
+ 7006: "afs3-errors",
+ 7007: "afs3-bos",
+ 7008: "afs3-update",
+ 7009: "afs3-rmtsys",
+ 7010: "ups-onlinet",
+ 7011: "talon-disc",
+ 7012: "talon-engine",
+ 7013: "microtalon-dis",
+ 7014: "microtalon-com",
+ 7015: "talon-webserver",
+ 7016: "spg",
+ 7017: "grasp",
+ 7019: "doceri-view",
+ 7020: "dpserve",
+ 7021: "dpserveadmin",
+ 7022: "ctdp",
+ 7023: "ct2nmcs",
+ 7024: "vmsvc",
+ 7025: "vmsvc-2",
+ 7030: "op-probe",
+ 7040: "quest-disc",
+ 7070: "arcp",
+ 7071: "iwg1",
+ 7080: "empowerid",
+ 7088: "zixi-transport",
+ 7095: "jdp-disc",
+ 7099: "lazy-ptop",
+ 7100: "font-service",
+ 7101: "elcn",
+ 7107: "aes-x170",
+ 7121: "virprot-lm",
+ 7128: "scenidm",
+ 7129: "scenccs",
+ 7161: "cabsm-comm",
+ 7162: "caistoragemgr",
+ 7163: "cacsambroker",
+ 7164: "fsr",
+ 7165: "doc-server",
+ 7166: "aruba-server",
+ 7169: "ccag-pib",
+ 7170: "nsrp",
+ 7171: "drm-production",
+ 7174: "clutild",
+ 7181: "janus-disc",
+ 7200: "fodms",
+ 7201: "dlip",
+ 7227: "ramp",
+ 7235: "aspcoordination",
+ 7244: "frc-hicp-disc",
+ 7262: "cnap",
+ 7272: "watchme-7272",
+ 7273: "oma-rlp",
+ 7274: "oma-rlp-s",
+ 7275: "oma-ulp",
+ 7276: "oma-ilp",
+ 7277: "oma-ilp-s",
+ 7278: "oma-dcdocbs",
+ 7279: "ctxlic",
+ 7280: "itactionserver1",
+ 7281: "itactionserver2",
+ 7282: "mzca-alert",
+ 7365: "lcm-server",
+ 7391: "mindfilesys",
+ 7392: "mrssrendezvous",
+ 7393: "nfoldman",
+ 7394: "fse",
+ 7395: "winqedit",
+ 7397: "hexarc",
+ 7400: "rtps-discovery",
+ 7401: "rtps-dd-ut",
+ 7402: "rtps-dd-mt",
+ 7410: "ionixnetmon",
+ 7411: "daqstream",
+ 7421: "mtportmon",
+ 7426: "pmdmgr",
+ 7427: "oveadmgr",
+ 7428: "ovladmgr",
+ 7429: "opi-sock",
+ 7430: "xmpv7",
+ 7431: "pmd",
+ 7437: "faximum",
+ 7443: "oracleas-https",
+ 7473: "rise",
+ 7491: "telops-lmd",
+ 7500: "silhouette",
+ 7501: "ovbus",
+ 7510: "ovhpas",
+ 7511: "pafec-lm",
+ 7542: "saratoga",
+ 7543: "atul",
+ 7544: "nta-ds",
+ 7545: "nta-us",
+ 7546: "cfs",
+ 7547: "cwmp",
+ 7548: "tidp",
+ 7549: "nls-tl",
+ 7550: "cloudsignaling",
+ 7560: "sncp",
+ 7566: "vsi-omega",
+ 7570: "aries-kfinder",
+ 7574: "coherence-disc",
+ 7588: "sun-lm",
+ 7606: "mipi-debug",
+ 7624: "indi",
+ 7627: "soap-http",
+ 7628: "zen-pawn",
+ 7629: "xdas",
+ 7633: "pmdfmgt",
+ 7648: "cuseeme",
+ 7674: "imqtunnels",
+ 7675: "imqtunnel",
+ 7676: "imqbrokerd",
+ 7677: "sun-user-https",
+ 7680: "pando-pub",
+ 7689: "collaber",
+ 7697: "klio",
+ 7707: "sync-em7",
+ 7708: "scinet",
+ 7720: "medimageportal",
+ 7724: "nsdeepfreezectl",
+ 7725: "nitrogen",
+ 7726: "freezexservice",
+ 7727: "trident-data",
+ 7728: "osvr",
+ 7734: "smip",
+ 7738: "aiagent",
+ 7741: "scriptview",
+ 7743: "sstp-1",
+ 7744: "raqmon-pdu",
+ 7747: "prgp",
+ 7777: "cbt",
+ 7778: "interwise",
+ 7779: "vstat",
+ 7781: "accu-lmgr",
+ 7784: "s-bfd",
+ 7786: "minivend",
+ 7787: "popup-reminders",
+ 7789: "office-tools",
+ 7794: "q3ade",
+ 7797: "pnet-conn",
+ 7798: "pnet-enc",
+ 7799: "altbsdp",
+ 7800: "asr",
+ 7801: "ssp-client",
+ 7802: "vns-tp",
+ 7810: "rbt-wanopt",
+ 7845: "apc-7845",
+ 7846: "apc-7846",
+ 7872: "mipv6tls",
+ 7880: "pss",
+ 7887: "ubroker",
+ 7900: "mevent",
+ 7901: "tnos-sp",
+ 7902: "tnos-dp",
+ 7903: "tnos-dps",
+ 7913: "qo-secure",
+ 7932: "t2-drm",
+ 7933: "t2-brm",
+ 7962: "generalsync",
+ 7967: "supercell",
+ 7979: "micromuse-ncps",
+ 7980: "quest-vista",
+ 7982: "sossd-disc",
+ 7998: "usicontentpush",
+ 7999: "irdmi2",
+ 8000: "irdmi",
+ 8001: "vcom-tunnel",
+ 8002: "teradataordbms",
+ 8003: "mcreport",
+ 8005: "mxi",
+ 8006: "wpl-disc",
+ 8007: "warppipe",
+ 8008: "http-alt",
+ 8019: "qbdb",
+ 8020: "intu-ec-svcdisc",
+ 8021: "intu-ec-client",
+ 8022: "oa-system",
+ 8025: "ca-audit-da",
+ 8026: "ca-audit-ds",
+ 8032: "pro-ed",
+ 8033: "mindprint",
+ 8034: "vantronix-mgmt",
+ 8040: "ampify",
+ 8041: "enguity-xccetp",
+ 8052: "senomix01",
+ 8053: "senomix02",
+ 8054: "senomix03",
+ 8055: "senomix04",
+ 8056: "senomix05",
+ 8057: "senomix06",
+ 8058: "senomix07",
+ 8059: "senomix08",
+ 8060: "aero",
+ 8074: "gadugadu",
+ 8080: "http-alt",
+ 8081: "sunproxyadmin",
+ 8082: "us-cli",
+ 8083: "us-srv",
+ 8086: "d-s-n",
+ 8087: "simplifymedia",
+ 8088: "radan-http",
+ 8097: "sac",
+ 8100: "xprint-server",
+ 8115: "mtl8000-matrix",
+ 8116: "cp-cluster",
+ 8118: "privoxy",
+ 8121: "apollo-data",
+ 8122: "apollo-admin",
+ 8128: "paycash-online",
+ 8129: "paycash-wbp",
+ 8130: "indigo-vrmi",
+ 8131: "indigo-vbcp",
+ 8132: "dbabble",
+ 8148: "isdd",
+ 8149: "eor-game",
+ 8160: "patrol",
+ 8161: "patrol-snmp",
+ 8182: "vmware-fdm",
+ 8184: "itach",
+ 8192: "spytechphone",
+ 8194: "blp1",
+ 8195: "blp2",
+ 8199: "vvr-data",
+ 8200: "trivnet1",
+ 8201: "trivnet2",
+ 8202: "aesop",
+ 8204: "lm-perfworks",
+ 8205: "lm-instmgr",
+ 8206: "lm-dta",
+ 8207: "lm-sserver",
+ 8208: "lm-webwatcher",
+ 8230: "rexecj",
+ 8231: "hncp-udp-port",
+ 8232: "hncp-dtls-port",
+ 8243: "synapse-nhttps",
+ 8276: "pando-sec",
+ 8280: "synapse-nhttp",
+ 8282: "libelle-disc",
+ 8292: "blp3",
+ 8294: "blp4",
+ 8300: "tmi",
+ 8301: "amberon",
+ 8320: "tnp-discover",
+ 8321: "tnp",
+ 8322: "garmin-marine",
+ 8351: "server-find",
+ 8376: "cruise-enum",
+ 8377: "cruise-swroute",
+ 8378: "cruise-config",
+ 8379: "cruise-diags",
+ 8380: "cruise-update",
+ 8383: "m2mservices",
+ 8384: "marathontp",
+ 8400: "cvd",
+ 8401: "sabarsd",
+ 8402: "abarsd",
+ 8403: "admind",
+ 8416: "espeech",
+ 8417: "espeech-rtp",
+ 8442: "cybro-a-bus",
+ 8443: "pcsync-https",
+ 8444: "pcsync-http",
+ 8445: "copy-disc",
+ 8450: "npmp",
+ 8472: "otv",
+ 8473: "vp2p",
+ 8474: "noteshare",
+ 8500: "fmtp",
+ 8501: "cmtp-av",
+ 8503: "lsp-self-ping",
+ 8554: "rtsp-alt",
+ 8555: "d-fence",
+ 8567: "dof-tunnel",
+ 8600: "asterix",
+ 8609: "canon-cpp-disc",
+ 8610: "canon-mfnp",
+ 8611: "canon-bjnp1",
+ 8612: "canon-bjnp2",
+ 8613: "canon-bjnp3",
+ 8614: "canon-bjnp4",
+ 8675: "msi-cps-rm-disc",
+ 8686: "sun-as-jmxrmi",
+ 8732: "dtp-net",
+ 8733: "ibus",
+ 8763: "mc-appserver",
+ 8764: "openqueue",
+ 8765: "ultraseek-http",
+ 8766: "amcs",
+ 8770: "dpap",
+ 8786: "msgclnt",
+ 8787: "msgsrvr",
+ 8793: "acd-pm",
+ 8800: "sunwebadmin",
+ 8804: "truecm",
+ 8805: "pfcp",
+ 8808: "ssports-bcast",
+ 8873: "dxspider",
+ 8880: "cddbp-alt",
+ 8883: "secure-mqtt",
+ 8888: "ddi-udp-1",
+ 8889: "ddi-udp-2",
+ 8890: "ddi-udp-3",
+ 8891: "ddi-udp-4",
+ 8892: "ddi-udp-5",
+ 8893: "ddi-udp-6",
+ 8894: "ddi-udp-7",
+ 8899: "ospf-lite",
+ 8900: "jmb-cds1",
+ 8901: "jmb-cds2",
+ 8910: "manyone-http",
+ 8911: "manyone-xml",
+ 8912: "wcbackup",
+ 8913: "dragonfly",
+ 8954: "cumulus-admin",
+ 8980: "nod-provider",
+ 8981: "nod-client",
+ 8989: "sunwebadmins",
+ 8990: "http-wmap",
+ 8991: "https-wmap",
+ 8999: "bctp",
+ 9000: "cslistener",
+ 9001: "etlservicemgr",
+ 9002: "dynamid",
+ 9007: "ogs-client",
+ 9009: "pichat",
+ 9020: "tambora",
+ 9021: "panagolin-ident",
+ 9022: "paragent",
+ 9023: "swa-1",
+ 9024: "swa-2",
+ 9025: "swa-3",
+ 9026: "swa-4",
+ 9060: "CardWeb-RT",
+ 9080: "glrpc",
+ 9084: "aurora",
+ 9085: "ibm-rsyscon",
+ 9086: "net2display",
+ 9087: "classic",
+ 9088: "sqlexec",
+ 9089: "sqlexec-ssl",
+ 9090: "websm",
+ 9091: "xmltec-xmlmail",
+ 9092: "XmlIpcRegSvc",
+ 9100: "hp-pdl-datastr",
+ 9101: "bacula-dir",
+ 9102: "bacula-fd",
+ 9103: "bacula-sd",
+ 9104: "peerwire",
+ 9105: "xadmin",
+ 9106: "astergate-disc",
+ 9119: "mxit",
+ 9131: "dddp",
+ 9160: "apani1",
+ 9161: "apani2",
+ 9162: "apani3",
+ 9163: "apani4",
+ 9164: "apani5",
+ 9191: "sun-as-jpda",
+ 9200: "wap-wsp",
+ 9201: "wap-wsp-wtp",
+ 9202: "wap-wsp-s",
+ 9203: "wap-wsp-wtp-s",
+ 9204: "wap-vcard",
+ 9205: "wap-vcal",
+ 9206: "wap-vcard-s",
+ 9207: "wap-vcal-s",
+ 9208: "rjcdb-vcards",
+ 9209: "almobile-system",
+ 9210: "oma-mlp",
+ 9211: "oma-mlp-s",
+ 9212: "serverviewdbms",
+ 9213: "serverstart",
+ 9214: "ipdcesgbs",
+ 9215: "insis",
+ 9216: "acme",
+ 9217: "fsc-port",
+ 9222: "teamcoherence",
+ 9255: "mon",
+ 9277: "traingpsdata",
+ 9278: "pegasus",
+ 9279: "pegasus-ctl",
+ 9280: "pgps",
+ 9281: "swtp-port1",
+ 9282: "swtp-port2",
+ 9283: "callwaveiam",
+ 9284: "visd",
+ 9285: "n2h2server",
+ 9286: "n2receive",
+ 9287: "cumulus",
+ 9292: "armtechdaemon",
+ 9293: "storview",
+ 9294: "armcenterhttp",
+ 9295: "armcenterhttps",
+ 9300: "vrace",
+ 9318: "secure-ts",
+ 9321: "guibase",
+ 9343: "mpidcmgr",
+ 9344: "mphlpdmc",
+ 9346: "ctechlicensing",
+ 9374: "fjdmimgr",
+ 9380: "boxp",
+ 9396: "fjinvmgr",
+ 9397: "mpidcagt",
+ 9400: "sec-t4net-srv",
+ 9401: "sec-t4net-clt",
+ 9402: "sec-pc2fax-srv",
+ 9418: "git",
+ 9443: "tungsten-https",
+ 9444: "wso2esb-console",
+ 9450: "sntlkeyssrvr",
+ 9500: "ismserver",
+ 9522: "sma-spw",
+ 9535: "mngsuite",
+ 9536: "laes-bf",
+ 9555: "trispen-sra",
+ 9592: "ldgateway",
+ 9593: "cba8",
+ 9594: "msgsys",
+ 9595: "pds",
+ 9596: "mercury-disc",
+ 9597: "pd-admin",
+ 9598: "vscp",
+ 9599: "robix",
+ 9600: "micromuse-ncpw",
+ 9612: "streamcomm-ds",
+ 9618: "condor",
+ 9628: "odbcpathway",
+ 9629: "uniport",
+ 9632: "mc-comm",
+ 9667: "xmms2",
+ 9668: "tec5-sdctp",
+ 9694: "client-wakeup",
+ 9695: "ccnx",
+ 9700: "board-roar",
+ 9747: "l5nas-parchan",
+ 9750: "board-voip",
+ 9753: "rasadv",
+ 9762: "tungsten-http",
+ 9800: "davsrc",
+ 9801: "sstp-2",
+ 9802: "davsrcs",
+ 9875: "sapv1",
+ 9878: "kca-service",
+ 9888: "cyborg-systems",
+ 9889: "gt-proxy",
+ 9898: "monkeycom",
+ 9899: "sctp-tunneling",
+ 9900: "iua",
+ 9901: "enrp",
+ 9903: "multicast-ping",
+ 9909: "domaintime",
+ 9911: "sype-transport",
+ 9950: "apc-9950",
+ 9951: "apc-9951",
+ 9952: "apc-9952",
+ 9953: "acis",
+ 9955: "alljoyn-mcm",
+ 9956: "alljoyn",
+ 9966: "odnsp",
+ 9987: "dsm-scm-target",
+ 9990: "osm-appsrvr",
+ 9991: "osm-oev",
+ 9992: "palace-1",
+ 9993: "palace-2",
+ 9994: "palace-3",
+ 9995: "palace-4",
+ 9996: "palace-5",
+ 9997: "palace-6",
+ 9998: "distinct32",
+ 9999: "distinct",
+ 10000: "ndmp",
+ 10001: "scp-config",
+ 10002: "documentum",
+ 10003: "documentum-s",
+ 10007: "mvs-capacity",
+ 10008: "octopus",
+ 10009: "swdtp-sv",
+ 10050: "zabbix-agent",
+ 10051: "zabbix-trapper",
+ 10080: "amanda",
+ 10081: "famdc",
+ 10100: "itap-ddtp",
+ 10101: "ezmeeting-2",
+ 10102: "ezproxy-2",
+ 10103: "ezrelay",
+ 10104: "swdtp",
+ 10107: "bctp-server",
+ 10110: "nmea-0183",
+ 10111: "nmea-onenet",
+ 10113: "netiq-endpoint",
+ 10114: "netiq-qcheck",
+ 10115: "netiq-endpt",
+ 10116: "netiq-voipa",
+ 10117: "iqrm",
+ 10128: "bmc-perf-sd",
+ 10160: "qb-db-server",
+ 10161: "snmpdtls",
+ 10162: "snmpdtls-trap",
+ 10200: "trisoap",
+ 10201: "rscs",
+ 10252: "apollo-relay",
+ 10253: "eapol-relay",
+ 10260: "axis-wimp-port",
+ 10288: "blocks",
+ 10439: "bngsync",
+ 10500: "hip-nat-t",
+ 10540: "MOS-lower",
+ 10541: "MOS-upper",
+ 10542: "MOS-aux",
+ 10543: "MOS-soap",
+ 10544: "MOS-soap-opt",
+ 10800: "gap",
+ 10805: "lpdg",
+ 10810: "nmc-disc",
+ 10860: "helix",
+ 10880: "bveapi",
+ 10990: "rmiaux",
+ 11000: "irisa",
+ 11001: "metasys",
+ 10023: "cefd-vmp",
+ 11095: "weave",
+ 11106: "sgi-lk",
+ 11108: "myq-termlink",
+ 11111: "vce",
+ 11112: "dicom",
+ 11161: "suncacao-snmp",
+ 11162: "suncacao-jmxmp",
+ 11163: "suncacao-rmi",
+ 11164: "suncacao-csa",
+ 11165: "suncacao-websvc",
+ 11171: "snss",
+ 11201: "smsqp",
+ 11208: "wifree",
+ 11211: "memcache",
+ 11319: "imip",
+ 11320: "imip-channels",
+ 11321: "arena-server",
+ 11367: "atm-uhas",
+ 11371: "hkp",
+ 11430: "lsdp",
+ 11600: "tempest-port",
+ 11720: "h323callsigalt",
+ 11723: "emc-xsw-dcache",
+ 11751: "intrepid-ssl",
+ 11796: "lanschool-mpt",
+ 11876: "xoraya",
+ 11877: "x2e-disc",
+ 11967: "sysinfo-sp",
+ 12000: "entextxid",
+ 12001: "entextnetwk",
+ 12002: "entexthigh",
+ 12003: "entextmed",
+ 12004: "entextlow",
+ 12005: "dbisamserver1",
+ 12006: "dbisamserver2",
+ 12007: "accuracer",
+ 12008: "accuracer-dbms",
+ 12009: "ghvpn",
+ 12012: "vipera",
+ 12013: "vipera-ssl",
+ 12109: "rets-ssl",
+ 12121: "nupaper-ss",
+ 12168: "cawas",
+ 12172: "hivep",
+ 12300: "linogridengine",
+ 12321: "warehouse-sss",
+ 12322: "warehouse",
+ 12345: "italk",
+ 12753: "tsaf",
+ 13160: "i-zipqd",
+ 13216: "bcslogc",
+ 13217: "rs-pias",
+ 13218: "emc-vcas-udp",
+ 13223: "powwow-client",
+ 13224: "powwow-server",
+ 13400: "doip-disc",
+ 13720: "bprd",
+ 13721: "bpdbm",
+ 13722: "bpjava-msvc",
+ 13724: "vnetd",
+ 13782: "bpcd",
+ 13783: "vopied",
+ 13785: "nbdb",
+ 13786: "nomdb",
+ 13818: "dsmcc-config",
+ 13819: "dsmcc-session",
+ 13820: "dsmcc-passthru",
+ 13821: "dsmcc-download",
+ 13822: "dsmcc-ccp",
+ 13894: "ucontrol",
+ 13929: "dta-systems",
+ 14000: "scotty-ft",
+ 14001: "sua",
+ 14002: "scotty-disc",
+ 14033: "sage-best-com1",
+ 14034: "sage-best-com2",
+ 14141: "vcs-app",
+ 14142: "icpp",
+ 14145: "gcm-app",
+ 14149: "vrts-tdd",
+ 14154: "vad",
+ 14250: "cps",
+ 14414: "ca-web-update",
+ 14936: "hde-lcesrvr-1",
+ 14937: "hde-lcesrvr-2",
+ 15000: "hydap",
+ 15118: "v2g-secc",
+ 15345: "xpilot",
+ 15363: "3link",
+ 15555: "cisco-snat",
+ 15660: "bex-xr",
+ 15740: "ptp",
+ 15998: "2ping",
+ 16003: "alfin",
+ 16161: "sun-sea-port",
+ 16309: "etb4j",
+ 16310: "pduncs",
+ 16311: "pdefmns",
+ 16360: "netserialext1",
+ 16361: "netserialext2",
+ 16367: "netserialext3",
+ 16368: "netserialext4",
+ 16384: "connected",
+ 16666: "vtp",
+ 16900: "newbay-snc-mc",
+ 16950: "sgcip",
+ 16991: "intel-rci-mp",
+ 16992: "amt-soap-http",
+ 16993: "amt-soap-https",
+ 16994: "amt-redir-tcp",
+ 16995: "amt-redir-tls",
+ 17007: "isode-dua",
+ 17185: "soundsvirtual",
+ 17219: "chipper",
+ 17220: "avtp",
+ 17221: "avdecc",
+ 17222: "cpsp",
+ 17224: "trdp-pd",
+ 17225: "trdp-md",
+ 17234: "integrius-stp",
+ 17235: "ssh-mgmt",
+ 17500: "db-lsp-disc",
+ 17729: "ea",
+ 17754: "zep",
+ 17755: "zigbee-ip",
+ 17756: "zigbee-ips",
+ 18000: "biimenu",
+ 18181: "opsec-cvp",
+ 18182: "opsec-ufp",
+ 18183: "opsec-sam",
+ 18184: "opsec-lea",
+ 18185: "opsec-omi",
+ 18186: "ohsc",
+ 18187: "opsec-ela",
+ 18241: "checkpoint-rtm",
+ 18262: "gv-pf",
+ 18463: "ac-cluster",
+ 18634: "rds-ib",
+ 18635: "rds-ip",
+ 18668: "vdmmesh-disc",
+ 18769: "ique",
+ 18881: "infotos",
+ 18888: "apc-necmp",
+ 19000: "igrid",
+ 19007: "scintilla",
+ 19191: "opsec-uaa",
+ 19194: "ua-secureagent",
+ 19220: "cora-disc",
+ 19283: "keysrvr",
+ 19315: "keyshadow",
+ 19398: "mtrgtrans",
+ 19410: "hp-sco",
+ 19411: "hp-sca",
+ 19412: "hp-sessmon",
+ 19539: "fxuptp",
+ 19540: "sxuptp",
+ 19541: "jcp",
+ 19788: "mle",
+ 19999: "dnp-sec",
+ 20000: "dnp",
+ 20001: "microsan",
+ 20002: "commtact-http",
+ 20003: "commtact-https",
+ 20005: "openwebnet",
+ 20012: "ss-idi-disc",
+ 20014: "opendeploy",
+ 20034: "nburn-id",
+ 20046: "tmophl7mts",
+ 20048: "mountd",
+ 20049: "nfsrdma",
+ 20167: "tolfab",
+ 20202: "ipdtp-port",
+ 20222: "ipulse-ics",
+ 20480: "emwavemsg",
+ 20670: "track",
+ 20999: "athand-mmp",
+ 21000: "irtrans",
+ 21554: "dfserver",
+ 21590: "vofr-gateway",
+ 21800: "tvpm",
+ 21845: "webphone",
+ 21846: "netspeak-is",
+ 21847: "netspeak-cs",
+ 21848: "netspeak-acd",
+ 21849: "netspeak-cps",
+ 22000: "snapenetio",
+ 22001: "optocontrol",
+ 22002: "optohost002",
+ 22003: "optohost003",
+ 22004: "optohost004",
+ 22005: "optohost004",
+ 22273: "wnn6",
+ 22305: "cis",
+ 22335: "shrewd-stream",
+ 22343: "cis-secure",
+ 22347: "wibukey",
+ 22350: "codemeter",
+ 22555: "vocaltec-phone",
+ 22763: "talikaserver",
+ 22800: "aws-brf",
+ 22951: "brf-gw",
+ 23000: "inovaport1",
+ 23001: "inovaport2",
+ 23002: "inovaport3",
+ 23003: "inovaport4",
+ 23004: "inovaport5",
+ 23005: "inovaport6",
+ 23272: "s102",
+ 23294: "5afe-disc",
+ 23333: "elxmgmt",
+ 23400: "novar-dbase",
+ 23401: "novar-alarm",
+ 23402: "novar-global",
+ 24000: "med-ltp",
+ 24001: "med-fsp-rx",
+ 24002: "med-fsp-tx",
+ 24003: "med-supp",
+ 24004: "med-ovw",
+ 24005: "med-ci",
+ 24006: "med-net-svc",
+ 24242: "filesphere",
+ 24249: "vista-4gl",
+ 24321: "ild",
+ 24322: "hid",
+ 24386: "intel-rci",
+ 24465: "tonidods",
+ 24554: "binkp",
+ 24577: "bilobit-update",
+ 24676: "canditv",
+ 24677: "flashfiler",
+ 24678: "proactivate",
+ 24680: "tcc-http",
+ 24850: "assoc-disc",
+ 24922: "find",
+ 25000: "icl-twobase1",
+ 25001: "icl-twobase2",
+ 25002: "icl-twobase3",
+ 25003: "icl-twobase4",
+ 25004: "icl-twobase5",
+ 25005: "icl-twobase6",
+ 25006: "icl-twobase7",
+ 25007: "icl-twobase8",
+ 25008: "icl-twobase9",
+ 25009: "icl-twobase10",
+ 25793: "vocaltec-hos",
+ 25900: "tasp-net",
+ 25901: "niobserver",
+ 25902: "nilinkanalyst",
+ 25903: "niprobe",
+ 25954: "bf-game",
+ 25955: "bf-master",
+ 26000: "quake",
+ 26133: "scscp",
+ 26208: "wnn6-ds",
+ 26260: "ezproxy",
+ 26261: "ezmeeting",
+ 26262: "k3software-svr",
+ 26263: "k3software-cli",
+ 26486: "exoline-udp",
+ 26487: "exoconfig",
+ 26489: "exonet",
+ 27345: "imagepump",
+ 27442: "jesmsjc",
+ 27504: "kopek-httphead",
+ 27782: "ars-vista",
+ 27999: "tw-auth-key",
+ 28000: "nxlmd",
+ 28119: "a27-ran-ran",
+ 28200: "voxelstorm",
+ 28240: "siemensgsm",
+ 29167: "otmp",
+ 30001: "pago-services1",
+ 30002: "pago-services2",
+ 30003: "amicon-fpsu-ra",
+ 30004: "amicon-fpsu-s",
+ 30260: "kingdomsonline",
+ 30832: "samsung-disc",
+ 30999: "ovobs",
+ 31016: "ka-kdp",
+ 31029: "yawn",
+ 31416: "xqosd",
+ 31457: "tetrinet",
+ 31620: "lm-mon",
+ 31765: "gamesmith-port",
+ 31948: "iceedcp-tx",
+ 31949: "iceedcp-rx",
+ 32034: "iracinghelper",
+ 32249: "t1distproc60",
+ 32483: "apm-link",
+ 32635: "sec-ntb-clnt",
+ 32636: "DMExpress",
+ 32767: "filenet-powsrm",
+ 32768: "filenet-tms",
+ 32769: "filenet-rpc",
+ 32770: "filenet-nch",
+ 32771: "filenet-rmi",
+ 32772: "filenet-pa",
+ 32773: "filenet-cm",
+ 32774: "filenet-re",
+ 32775: "filenet-pch",
+ 32776: "filenet-peior",
+ 32777: "filenet-obrok",
+ 32801: "mlsn",
+ 32896: "idmgratm",
+ 33123: "aurora-balaena",
+ 33331: "diamondport",
+ 33334: "speedtrace-disc",
+ 33434: "traceroute",
+ 33656: "snip-slave",
+ 34249: "turbonote-2",
+ 34378: "p-net-local",
+ 34379: "p-net-remote",
+ 34567: "edi_service",
+ 34962: "profinet-rt",
+ 34963: "profinet-rtm",
+ 34964: "profinet-cm",
+ 34980: "ethercat",
+ 35001: "rt-viewer",
+ 35004: "rt-classmanager",
+ 35100: "axio-disc",
+ 35355: "altova-lm-disc",
+ 36001: "allpeers",
+ 36411: "wlcp",
+ 36865: "kastenxpipe",
+ 37475: "neckar",
+ 37654: "unisys-eportal",
+ 38002: "crescoctrl-disc",
+ 38201: "galaxy7-data",
+ 38202: "fairview",
+ 38203: "agpolicy",
+ 39681: "turbonote-1",
+ 40000: "safetynetp",
+ 40023: "k-patentssensor",
+ 40841: "cscp",
+ 40842: "csccredir",
+ 40843: "csccfirewall",
+ 40853: "ortec-disc",
+ 41111: "fs-qos",
+ 41230: "z-wave-s",
+ 41794: "crestron-cip",
+ 41795: "crestron-ctp",
+ 42508: "candp",
+ 42509: "candrp",
+ 42510: "caerpc",
+ 43000: "recvr-rc-disc",
+ 43188: "reachout",
+ 43189: "ndm-agent-port",
+ 43190: "ip-provision",
+ 43210: "shaperai-disc",
+ 43439: "eq3-config",
+ 43440: "ew-disc-cmd",
+ 43441: "ciscocsdb",
+ 44321: "pmcd",
+ 44322: "pmcdproxy",
+ 44544: "domiq",
+ 44553: "rbr-debug",
+ 44600: "asihpi",
+ 44818: "EtherNet-IP-2",
+ 44900: "m3da-disc",
+ 45000: "asmp-mon",
+ 45054: "invision-ag",
+ 45514: "cloudcheck-ping",
+ 45678: "eba",
+ 45825: "qdb2service",
+ 45966: "ssr-servermgr",
+ 46999: "mediabox",
+ 47000: "mbus",
+ 47100: "jvl-mactalk",
+ 47557: "dbbrowse",
+ 47624: "directplaysrvr",
+ 47806: "ap",
+ 47808: "bacnet",
+ 47809: "presonus-ucnet",
+ 48000: "nimcontroller",
+ 48001: "nimspooler",
+ 48002: "nimhub",
+ 48003: "nimgtw",
+ 48128: "isnetserv",
+ 48129: "blp5",
+ 48556: "com-bardac-dw",
+ 48619: "iqobject",
+ 48653: "robotraconteur",
+ 49001: "nusdp-disc",
+}
+var sctpPortNames = map[SCTPPort]string{
+ 9: "discard",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 80: "http",
+ 179: "bgp",
+ 443: "https",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1167: "cisco-ipsla",
+ 1720: "h323hostcall",
+ 2049: "nfs",
+ 2225: "rcip-itu",
+ 2904: "m2ua",
+ 2905: "m3ua",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 3097: "itu-bicc-stc",
+ 3565: "m2pa",
+ 3863: "asap-sctp",
+ 3864: "asap-sctp-tls",
+ 3868: "diameter",
+ 4333: "ahsp",
+ 4502: "a25-fap-fgw",
+ 4711: "trinity-dist",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 5060: "sip",
+ 5061: "sips",
+ 5090: "car",
+ 5091: "cxtp",
+ 5215: "noteza",
+ 5445: "smbdirect",
+ 5672: "amqp",
+ 5675: "v5ua",
+ 5868: "diameters",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 6704: "frc-hp",
+ 6705: "frc-mp",
+ 6706: "frc-lp",
+ 6970: "conductor-mpx",
+ 7626: "simco",
+ 7701: "nfapi",
+ 7728: "osvr",
+ 8471: "pim-port",
+ 9082: "lcs-ap",
+ 9084: "aurora",
+ 9900: "iua",
+ 9901: "enrp-sctp",
+ 9902: "enrp-sctp-tls",
+ 11997: "wmereceiving",
+ 11998: "wmedistribution",
+ 11999: "wmereporting",
+ 14001: "sua",
+ 20049: "nfsrdma",
+ 25471: "rna",
+ 29118: "sgsap",
+ 29168: "sbcap",
+ 29169: "iuhsctpassoc",
+ 30100: "rwp",
+ 36412: "s1-control",
+ 36422: "x2-control",
+ 36423: "slmap",
+ 36424: "nq-ap",
+ 36443: "m2ap",
+ 36444: "m3ap",
+ 36462: "xw-control",
+ 38412: "ng-control",
+ 38422: "xn-control",
+ 38472: "f1-control",
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/google/gopacket/layers/icmp4.go
new file mode 100644
index 0000000..bd3f03f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp4.go
@@ -0,0 +1,267 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ ICMPv4TypeEchoReply = 0
+ ICMPv4TypeDestinationUnreachable = 3
+ ICMPv4TypeSourceQuench = 4
+ ICMPv4TypeRedirect = 5
+ ICMPv4TypeEchoRequest = 8
+ ICMPv4TypeRouterAdvertisement = 9
+ ICMPv4TypeRouterSolicitation = 10
+ ICMPv4TypeTimeExceeded = 11
+ ICMPv4TypeParameterProblem = 12
+ ICMPv4TypeTimestampRequest = 13
+ ICMPv4TypeTimestampReply = 14
+ ICMPv4TypeInfoRequest = 15
+ ICMPv4TypeInfoReply = 16
+ ICMPv4TypeAddressMaskRequest = 17
+ ICMPv4TypeAddressMaskReply = 18
+)
+
+const (
+ // DestinationUnreachable
+ ICMPv4CodeNet = 0
+ ICMPv4CodeHost = 1
+ ICMPv4CodeProtocol = 2
+ ICMPv4CodePort = 3
+ ICMPv4CodeFragmentationNeeded = 4
+ ICMPv4CodeSourceRoutingFailed = 5
+ ICMPv4CodeNetUnknown = 6
+ ICMPv4CodeHostUnknown = 7
+ ICMPv4CodeSourceIsolated = 8
+ ICMPv4CodeNetAdminProhibited = 9
+ ICMPv4CodeHostAdminProhibited = 10
+ ICMPv4CodeNetTOS = 11
+ ICMPv4CodeHostTOS = 12
+ ICMPv4CodeCommAdminProhibited = 13
+ ICMPv4CodeHostPrecedence = 14
+ ICMPv4CodePrecedenceCutoff = 15
+
+ // TimeExceeded
+ ICMPv4CodeTTLExceeded = 0
+ ICMPv4CodeFragmentReassemblyTimeExceeded = 1
+
+ // ParameterProblem
+ ICMPv4CodePointerIndicatesError = 0
+ ICMPv4CodeMissingOption = 1
+ ICMPv4CodeBadLength = 2
+
+ // Redirect
+ // ICMPv4CodeNet = same as for DestinationUnreachable
+ // ICMPv4CodeHost = same as for DestinationUnreachable
+ ICMPv4CodeTOSNet = 2
+ ICMPv4CodeTOSHost = 3
+)
+
+type icmpv4TypeCodeInfoStruct struct {
+ typeStr string
+ codeStr *map[uint8]string
+}
+
+var (
+ icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{
+ ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{
+ "DestinationUnreachable", &map[uint8]string{
+ ICMPv4CodeNet: "Net",
+ ICMPv4CodeHost: "Host",
+ ICMPv4CodeProtocol: "Protocol",
+ ICMPv4CodePort: "Port",
+ ICMPv4CodeFragmentationNeeded: "FragmentationNeeded",
+ ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed",
+ ICMPv4CodeNetUnknown: "NetUnknown",
+ ICMPv4CodeHostUnknown: "HostUnknown",
+ ICMPv4CodeSourceIsolated: "SourceIsolated",
+ ICMPv4CodeNetAdminProhibited: "NetAdminProhibited",
+ ICMPv4CodeHostAdminProhibited: "HostAdminProhibited",
+ ICMPv4CodeNetTOS: "NetTOS",
+ ICMPv4CodeHostTOS: "HostTOS",
+ ICMPv4CodeCommAdminProhibited: "CommAdminProhibited",
+ ICMPv4CodeHostPrecedence: "HostPrecedence",
+ ICMPv4CodePrecedenceCutoff: "PrecedenceCutoff",
+ },
+ },
+ ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{
+ "TimeExceeded", &map[uint8]string{
+ ICMPv4CodeTTLExceeded: "TTLExceeded",
+ ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+ },
+ },
+ ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{
+ "ParameterProblem", &map[uint8]string{
+ ICMPv4CodePointerIndicatesError: "PointerIndicatesError",
+ ICMPv4CodeMissingOption: "MissingOption",
+ ICMPv4CodeBadLength: "BadLength",
+ },
+ },
+ ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{
+ "SourceQuench", nil,
+ },
+ ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{
+ "Redirect", &map[uint8]string{
+ ICMPv4CodeNet: "Net",
+ ICMPv4CodeHost: "Host",
+ ICMPv4CodeTOSNet: "TOS+Net",
+ ICMPv4CodeTOSHost: "TOS+Host",
+ },
+ },
+ ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{
+ "EchoRequest", nil,
+ },
+ ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{
+ "EchoReply", nil,
+ },
+ ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{
+ "TimestampRequest", nil,
+ },
+ ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{
+ "TimestampReply", nil,
+ },
+ ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{
+ "InfoRequest", nil,
+ },
+ ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{
+ "InfoReply", nil,
+ },
+ ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{
+ "RouterSolicitation", nil,
+ },
+ ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{
+ "RouterAdvertisement", nil,
+ },
+ ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{
+ "AddressMaskRequest", nil,
+ },
+ ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{
+ "AddressMaskReply", nil,
+ },
+ }
+)
+
+type ICMPv4TypeCode uint16
+
+// Type returns the ICMPv4 type field.
+func (a ICMPv4TypeCode) Type() uint8 {
+ return uint8(a >> 8)
+}
+
+// Code returns the ICMPv4 code field.
+func (a ICMPv4TypeCode) Code() uint8 {
+ return uint8(a)
+}
+
+func (a ICMPv4TypeCode) String() string {
+ t, c := a.Type(), a.Code()
+ strInfo, ok := icmpv4TypeCodeInfo[t]
+ if !ok {
+ // Unknown ICMPv4 type field
+ return fmt.Sprintf("%d(%d)", t, c)
+ }
+ typeStr := strInfo.typeStr
+ if strInfo.codeStr == nil && c == 0 {
+ // The ICMPv4 type does not make use of the code field
+ return fmt.Sprintf("%s", strInfo.typeStr)
+ }
+ if strInfo.codeStr == nil && c != 0 {
+ // The ICMPv4 type does not make use of the code field, but it is present anyway
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ codeStr, ok := (*strInfo.codeStr)[c]
+ if !ok {
+ // We don't know this ICMPv4 code; print the numerical value
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv4TypeCode) GoString() string {
+ t := reflect.TypeOf(a)
+ return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer.
+func (a ICMPv4TypeCode) SerializeTo(bytes []byte) {
+ binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode
+// gopacket type from the ICMPv4 type and code values.
+func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode {
+ return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv4 is the layer for IPv4 ICMP packet data.
+type ICMPv4 struct {
+ BaseLayer
+ TypeCode ICMPv4TypeCode
+ Checksum uint16
+ Id uint16
+ Seq uint16
+}
+
+// LayerType returns LayerTypeICMPv4.
+func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet")
+ }
+ i.TypeCode = CreateICMPv4TypeCode(data[0], data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.Id = binary.BigEndian.Uint16(data[4:6])
+ i.Seq = binary.BigEndian.Uint16(data[6:8])
+ i.BaseLayer = BaseLayer{data[:8], data[8:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ i.TypeCode.SerializeTo(bytes)
+ binary.BigEndian.PutUint16(bytes[4:], i.Id)
+ binary.BigEndian.PutUint16(bytes[6:], i.Seq)
+ if opts.ComputeChecksums {
+ bytes[2] = 0
+ bytes[3] = 0
+ i.Checksum = tcpipChecksum(b.Bytes(), 0)
+ }
+ binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv4) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv4{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/google/gopacket/layers/icmp6.go
new file mode 100644
index 0000000..09afd11
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6.go
@@ -0,0 +1,266 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // The following are from RFC 4443
+ ICMPv6TypeDestinationUnreachable = 1
+ ICMPv6TypePacketTooBig = 2
+ ICMPv6TypeTimeExceeded = 3
+ ICMPv6TypeParameterProblem = 4
+ ICMPv6TypeEchoRequest = 128
+ ICMPv6TypeEchoReply = 129
+
+ // The following are from RFC 4861
+ ICMPv6TypeRouterSolicitation = 133
+ ICMPv6TypeRouterAdvertisement = 134
+ ICMPv6TypeNeighborSolicitation = 135
+ ICMPv6TypeNeighborAdvertisement = 136
+ ICMPv6TypeRedirect = 137
+
+ // The following are from RFC 2710
+ ICMPv6TypeMLDv1MulticastListenerQueryMessage = 130
+ ICMPv6TypeMLDv1MulticastListenerReportMessage = 131
+ ICMPv6TypeMLDv1MulticastListenerDoneMessage = 132
+
+ // The following are from RFC 3810
+ ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143
+)
+
+const (
+ // DestinationUnreachable
+ ICMPv6CodeNoRouteToDst = 0
+ ICMPv6CodeAdminProhibited = 1
+ ICMPv6CodeBeyondScopeOfSrc = 2
+ ICMPv6CodeAddressUnreachable = 3
+ ICMPv6CodePortUnreachable = 4
+ ICMPv6CodeSrcAddressFailedPolicy = 5
+ ICMPv6CodeRejectRouteToDst = 6
+
+ // TimeExceeded
+ ICMPv6CodeHopLimitExceeded = 0
+ ICMPv6CodeFragmentReassemblyTimeExceeded = 1
+
+ // ParameterProblem
+ ICMPv6CodeErroneousHeaderField = 0
+ ICMPv6CodeUnrecognizedNextHeader = 1
+ ICMPv6CodeUnrecognizedIPv6Option = 2
+)
+
+type icmpv6TypeCodeInfoStruct struct {
+ typeStr string
+ codeStr *map[uint8]string
+}
+
+var (
+ icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{
+ ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{
+ "DestinationUnreachable", &map[uint8]string{
+ ICMPv6CodeNoRouteToDst: "NoRouteToDst",
+ ICMPv6CodeAdminProhibited: "AdminProhibited",
+ ICMPv6CodeBeyondScopeOfSrc: "BeyondScopeOfSrc",
+ ICMPv6CodeAddressUnreachable: "AddressUnreachable",
+ ICMPv6CodePortUnreachable: "PortUnreachable",
+ ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy",
+ ICMPv6CodeRejectRouteToDst: "RejectRouteToDst",
+ },
+ },
+ ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{
+ "PacketTooBig", nil,
+ },
+ ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{
+ "TimeExceeded", &map[uint8]string{
+ ICMPv6CodeHopLimitExceeded: "HopLimitExceeded",
+ ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+ },
+ },
+ ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{
+ "ParameterProblem", &map[uint8]string{
+ ICMPv6CodeErroneousHeaderField: "ErroneousHeaderField",
+ ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader",
+ ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option",
+ },
+ },
+ ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{
+ "EchoRequest", nil,
+ },
+ ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{
+ "EchoReply", nil,
+ },
+ ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{
+ "RouterSolicitation", nil,
+ },
+ ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{
+ "RouterAdvertisement", nil,
+ },
+ ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{
+ "NeighborSolicitation", nil,
+ },
+ ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{
+ "NeighborAdvertisement", nil,
+ },
+ ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{
+ "Redirect", nil,
+ },
+ }
+)
+
+type ICMPv6TypeCode uint16
+
+// Type returns the ICMPv6 type field.
+func (a ICMPv6TypeCode) Type() uint8 {
+ return uint8(a >> 8)
+}
+
+// Code returns the ICMPv6 code field.
+func (a ICMPv6TypeCode) Code() uint8 {
+ return uint8(a)
+}
+
+func (a ICMPv6TypeCode) String() string {
+ t, c := a.Type(), a.Code()
+ strInfo, ok := icmpv6TypeCodeInfo[t]
+ if !ok {
+ // Unknown ICMPv6 type field
+ return fmt.Sprintf("%d(%d)", t, c)
+ }
+ typeStr := strInfo.typeStr
+ if strInfo.codeStr == nil && c == 0 {
+ // The ICMPv6 type does not make use of the code field
+ return fmt.Sprintf("%s", strInfo.typeStr)
+ }
+ if strInfo.codeStr == nil && c != 0 {
+ // The ICMPv6 type does not make use of the code field, but it is present anyway
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ codeStr, ok := (*strInfo.codeStr)[c]
+ if !ok {
+ // We don't know this ICMPv6 code; print the numerical value
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv6TypeCode) GoString() string {
+ t := reflect.TypeOf(a)
+ return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer.
+func (a ICMPv6TypeCode) SerializeTo(bytes []byte) {
+ binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode
+// gopacket type from the ICMPv6 type and code values.
+func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode {
+ return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv6 is the layer for IPv6 ICMP packet data
+type ICMPv6 struct {
+ BaseLayer
+ TypeCode ICMPv6TypeCode
+ Checksum uint16
+ // TypeBytes is deprecated and always nil. See the different ICMPv6 message types
+ // instead (e.g. ICMPv6TypeRouterSolicitation).
+ TypeBytes []byte
+ tcpipchecksum
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet")
+ }
+ i.TypeCode = CreateICMPv6TypeCode(data[0], data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ i.TypeCode.SerializeTo(bytes)
+
+ if opts.ComputeChecksums {
+ bytes[2] = 0
+ bytes[3] = 0
+ csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6)
+ if err != nil {
+ return err
+ }
+ i.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6) NextLayerType() gopacket.LayerType {
+ switch i.TypeCode.Type() {
+ case ICMPv6TypeEchoRequest:
+ return LayerTypeICMPv6Echo
+ case ICMPv6TypeEchoReply:
+ return LayerTypeICMPv6Echo
+ case ICMPv6TypeRouterSolicitation:
+ return LayerTypeICMPv6RouterSolicitation
+ case ICMPv6TypeRouterAdvertisement:
+ return LayerTypeICMPv6RouterAdvertisement
+ case ICMPv6TypeNeighborSolicitation:
+ return LayerTypeICMPv6NeighborSolicitation
+ case ICMPv6TypeNeighborAdvertisement:
+ return LayerTypeICMPv6NeighborAdvertisement
+ case ICMPv6TypeRedirect:
+ return LayerTypeICMPv6Redirect
+ case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query
+ if len(i.Payload) > 20 { // Only payload size differs
+ return LayerTypeMLDv2MulticastListenerQuery
+ } else {
+ return LayerTypeMLDv1MulticastListenerQuery
+ }
+ case ICMPv6TypeMLDv1MulticastListenerDoneMessage:
+ return LayerTypeMLDv1MulticastListenerDone
+ case ICMPv6TypeMLDv1MulticastListenerReportMessage:
+ return LayerTypeMLDv1MulticastListenerReport
+ case ICMPv6TypeMLDv2MulticastListenerReportMessageV2:
+ return LayerTypeMLDv2MulticastListenerReport
+ }
+
+ return gopacket.LayerTypePayload
+}
+
+func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/google/gopacket/layers/icmp6msg.go
new file mode 100644
index 0000000..d9268db
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6msg.go
@@ -0,0 +1,578 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+// Based on RFC 4861
+
+// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option.
+type ICMPv6Opt uint8
+
+const (
+ _ ICMPv6Opt = iota
+
+ // ICMPv6OptSourceAddress contains the link-layer address of the sender of
+ // the packet. It is used in the Neighbor Solicitation, Router
+ // Solicitation, and Router Advertisement packets. Must be ignored for other
+ // Neighbor discovery messages.
+ ICMPv6OptSourceAddress
+
+ // ICMPv6OptTargetAddress contains the link-layer address of the target. It
+ // is used in Neighbor Advertisement and Redirect packets. Must be ignored
+ // for other Neighbor discovery messages.
+ ICMPv6OptTargetAddress
+
+ // ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes
+ // for Address Autoconfiguration. The Prefix Information option appears in
+ // Router Advertisement packets and MUST be silently ignored for other
+ // messages.
+ ICMPv6OptPrefixInfo
+
+ // ICMPv6OptRedirectedHeader is used in Redirect messages and contains all
+ // or part of the packet that is being redirected.
+ ICMPv6OptRedirectedHeader
+
+ // ICMPv6OptMTU is used in Router Advertisement messages to ensure that all
+ // nodes on a link use the same MTU value in those cases where the link MTU
+ // is not well known. This option MUST be silently ignored for other
+ // Neighbor Discovery messages.
+ ICMPv6OptMTU
+)
+
+// ICMPv6Echo represents the structure of a ping.
+type ICMPv6Echo struct {
+ BaseLayer
+ Identifier uint16
+ SeqNumber uint16
+}
+
+// ICMPv6RouterSolicitation is sent by hosts to find routers.
+type ICMPv6RouterSolicitation struct {
+ BaseLayer
+ Options ICMPv6Options
+}
+
+// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation.
+type ICMPv6RouterAdvertisement struct {
+ BaseLayer
+ HopLimit uint8
+ Flags uint8
+ RouterLifetime uint16
+ ReachableTime uint32
+ RetransTimer uint32
+ Options ICMPv6Options
+}
+
+// ICMPv6NeighborSolicitation is sent to request the link-layer address of a
+// target node.
+type ICMPv6NeighborSolicitation struct {
+ BaseLayer
+ TargetAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation.
+type ICMPv6NeighborAdvertisement struct {
+ BaseLayer
+ Flags uint8
+ TargetAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node
+// on the path to a destination.
+type ICMPv6Redirect struct {
+ BaseLayer
+ TargetAddress net.IP
+ DestinationAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6Option contains the type and data for a single option.
+type ICMPv6Option struct {
+ Type ICMPv6Opt
+ Data []byte
+}
+
+// ICMPv6Options is a slice of ICMPv6Option.
+type ICMPv6Options []ICMPv6Option
+
+func (i ICMPv6Opt) String() string {
+ switch i {
+ case ICMPv6OptSourceAddress:
+ return "SourceAddress"
+ case ICMPv6OptTargetAddress:
+ return "TargetAddress"
+ case ICMPv6OptPrefixInfo:
+ return "PrefixInfo"
+ case ICMPv6OptRedirectedHeader:
+ return "RedirectedHeader"
+ case ICMPv6OptMTU:
+ return "MTU"
+ default:
+ return fmt.Sprintf("Unknown(%d)", i)
+ }
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6Echo
+}
+
+// LayerType returns LayerTypeICMPv6Echo.
+func (i *ICMPv6Echo) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6Echo
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo")
+ }
+ i.Identifier = binary.BigEndian.Uint16(data[0:2])
+ i.SeqNumber = binary.BigEndian.Uint16(data[2:4])
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint16(buf, i.Identifier)
+ binary.BigEndian.PutUint16(buf[2:], i.SeqNumber)
+ return nil
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6RouterSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ // first 4 bytes are reserved followed by options
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation")
+ }
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[4:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6RouterSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6RouterAdvertisement.
+func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6RouterAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement")
+ }
+
+ i.HopLimit = uint8(data[0])
+ // M, O bit followed by 6 reserved bits
+ i.Flags = uint8(data[1])
+ i.RouterLifetime = binary.BigEndian.Uint16(data[2:4])
+ i.ReachableTime = binary.BigEndian.Uint32(data[4:8])
+ i.RetransTimer = binary.BigEndian.Uint32(data[8:12])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[12:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(12)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(i.HopLimit)
+ buf[1] = byte(i.Flags)
+ binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime)
+ binary.BigEndian.PutUint32(buf[4:], i.ReachableTime)
+ binary.BigEndian.PutUint32(buf[8:], i.RetransTimer)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6RouterAdvertisement
+}
+
+// ManagedAddressConfig is true when addresses are available via DHCPv6. If
+// set, the OtherConfig flag is redundant.
+func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool {
+ return i.Flags&0x80 != 0
+}
+
+// OtherConfig is true when there is other configuration information available
+// via DHCPv6. For example, DNS-related information.
+func (i *ICMPv6RouterAdvertisement) OtherConfig() bool {
+ return i.Flags&0x40 != 0
+}
+
+// LayerType returns LayerTypeICMPv6NeighborSolicitation.
+func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6NeighborSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation")
+ }
+
+ i.TargetAddress = net.IP(data[4:20])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ copy(buf[4:], i.TargetAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6NeighborSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6NeighborAdvertisement.
+func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement")
+ }
+
+ i.Flags = uint8(data[0])
+ i.TargetAddress = net.IP(data[4:20])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(i.Flags)
+ copy(buf[1:], lotsOfZeros[:3])
+ copy(buf[4:], i.TargetAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// Router indicates whether the sender is a router or not.
+func (i *ICMPv6NeighborAdvertisement) Router() bool {
+ return i.Flags&0x80 != 0
+}
+
+// Solicited indicates whether the advertisement was solicited or not.
+func (i *ICMPv6NeighborAdvertisement) Solicited() bool {
+ return i.Flags&0x40 != 0
+}
+
+// Override indicates whether the advertisement should Override an existing
+// cache entry.
+func (i *ICMPv6NeighborAdvertisement) Override() bool {
+ return i.Flags&0x20 != 0
+}
+
+// LayerType returns LayerTypeICMPv6Redirect.
+func (i *ICMPv6Redirect) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6Redirect
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 36 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect")
+ }
+
+ i.TargetAddress = net.IP(data[4:20])
+ i.DestinationAddress = net.IP(data[20:36])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[36:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(36)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ copy(buf[4:], i.TargetAddress)
+ copy(buf[20:], i.DestinationAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6Redirect
+}
+
+func (i ICMPv6Option) String() string {
+ hd := hex.EncodeToString(i.Data)
+ if len(hd) > 0 {
+ hd = " 0x" + hd
+ }
+
+ switch i.Type {
+ case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress:
+ return fmt.Sprintf("ICMPv6Option(%s:%v)",
+ i.Type,
+ net.HardwareAddr(i.Data))
+ case ICMPv6OptPrefixInfo:
+ if len(i.Data) == 30 {
+ prefixLen := uint8(i.Data[0])
+ onLink := (i.Data[1]&0x80 != 0)
+ autonomous := (i.Data[1]&0x40 != 0)
+ validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second
+ preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second
+
+ prefix := net.IP(i.Data[14:])
+
+ return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)",
+ i.Type,
+ prefix, prefixLen,
+ onLink, autonomous,
+ validLifetime, preferredLifetime)
+ }
+ case ICMPv6OptRedirectedHeader:
+ // could invoke IP decoder on data... probably best not to
+ break
+ case ICMPv6OptMTU:
+ if len(i.Data) == 6 {
+ return fmt.Sprintf("ICMPv6Option(%s:%v)",
+ i.Type,
+ binary.BigEndian.Uint32(i.Data[2:]))
+ }
+
+ }
+ return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd)
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ for len(data) > 0 {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option")
+ }
+
+ // unit is 8 octets, convert to bytes
+ length := int(data[1]) * 8
+
+ if length == 0 {
+ df.SetTruncated()
+ return errors.New("ICMPv6 message option with length 0")
+ }
+
+ if len(data) < length {
+ df.SetTruncated()
+ return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length)
+ }
+
+ o := ICMPv6Option{
+ Type: ICMPv6Opt(data[0]),
+ Data: data[2:length],
+ }
+
+ // chop off option we just consumed
+ data = data[length:]
+
+ *i = append(*i, o)
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ for _, opt := range []ICMPv6Option(*i) {
+ length := len(opt.Data) + 2
+ buf, err := b.PrependBytes(length)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(opt.Type)
+ buf[1] = byte(length / 8)
+ copy(buf[2:], opt.Data)
+ }
+
+ return nil
+}
+
+func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6Echo{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6RouterSolicitation{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6RouterAdvertisement{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6NeighborSolicitation{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6NeighborAdvertisement{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6Redirect{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/google/gopacket/layers/igmp.go
new file mode 100644
index 0000000..d008415
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/igmp.go
@@ -0,0 +1,355 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+type IGMPType uint8
+
+const (
+ IGMPMembershipQuery IGMPType = 0x11 // General or group specific query
+ IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report
+ IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report
+ IGMPLeaveGroup IGMPType = 0x17 // Leave Group
+ IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report
+)
+
+// String conversions for IGMP message types
+func (i IGMPType) String() string {
+ switch i {
+ case IGMPMembershipQuery:
+ return "IGMP Membership Query"
+ case IGMPMembershipReportV1:
+ return "IGMPv1 Membership Report"
+ case IGMPMembershipReportV2:
+ return "IGMPv2 Membership Report"
+ case IGMPMembershipReportV3:
+ return "IGMPv3 Membership Report"
+ case IGMPLeaveGroup:
+ return "Leave Group"
+ default:
+ return ""
+ }
+}
+
+type IGMPv3GroupRecordType uint8
+
+const (
+ IGMPIsIn IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x
+ IGMPIsEx IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x
+ IGMPToIn IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x
+ IGMPToEx IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x
+ IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x
+ IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x
+)
+
+func (i IGMPv3GroupRecordType) String() string {
+ switch i {
+ case IGMPIsIn:
+ return "MODE_IS_INCLUDE"
+ case IGMPIsEx:
+ return "MODE_IS_EXCLUDE"
+ case IGMPToIn:
+ return "CHANGE_TO_INCLUDE_MODE"
+ case IGMPToEx:
+ return "CHANGE_TO_EXCLUDE_MODE"
+ case IGMPAllow:
+ return "ALLOW_NEW_SOURCES"
+ case IGMPBlock:
+ return "BLOCK_OLD_SOURCES"
+ default:
+ return ""
+ }
+}
+
+// IGMP represents an IGMPv3 message.
+type IGMP struct {
+ BaseLayer
+ Type IGMPType
+ MaxResponseTime time.Duration
+ Checksum uint16
+ GroupAddress net.IP
+ SupressRouterProcessing bool
+ RobustnessValue uint8
+ IntervalTime time.Duration
+ SourceAddresses []net.IP
+ NumberOfGroupRecords uint16
+ NumberOfSources uint16
+ GroupRecords []IGMPv3GroupRecord
+ Version uint8 // IGMP protocol version
+}
+
+// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type | Max Resp Time | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Group Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type IGMPv1or2 struct {
+ BaseLayer
+ Type IGMPType // IGMP message type
+ MaxResponseTime time.Duration // meaningful only in Membership Query messages
+ Checksum uint16 // 16-bit checksum of entire ip payload
+ GroupAddress net.IP // either 0 or an IP multicast address
+ Version uint8
+}
+
+// decodeResponse dissects IGMPv1 or IGMPv2 packet.
+func (i *IGMPv1or2) decodeResponse(data []byte) error {
+ if len(data) < 8 {
+ return errors.New("IGMP packet too small")
+ }
+
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.GroupAddress = net.IP(data[4:8])
+
+ return nil
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type = 0x22 | Reserved | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Reserved | Number of Group Records (M) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [1] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [2] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [M] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Record Type | Aux Data Len | Number of Sources (N) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Multicast Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Source Address [1] |
+// +- -+
+// | Source Address [2] |
+// +- -+
+// | Source Address [N] |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Auxiliary Data .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message.
+type IGMPv3GroupRecord struct {
+ Type IGMPv3GroupRecordType
+ AuxDataLen uint8 // this should always be 0 as per IGMPv3 spec.
+ NumberOfSources uint16
+ MulticastAddress net.IP
+ SourceAddresses []net.IP
+ AuxData uint32 // NOT USED
+}
+
+func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error {
+ if len(data) < 8 {
+ return errors.New("IGMPv3 Membership Report too small #1")
+ }
+
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8])
+
+ recordOffset := 8
+ for j := 0; j < int(i.NumberOfGroupRecords); j++ {
+ if len(data) < recordOffset+8 {
+ return errors.New("IGMPv3 Membership Report too small #2")
+ }
+
+ var gr IGMPv3GroupRecord
+ gr.Type = IGMPv3GroupRecordType(data[recordOffset])
+ gr.AuxDataLen = data[recordOffset+1]
+ gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4])
+ gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8])
+
+ if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 {
+ return errors.New("IGMPv3 Membership Report too small #3")
+ }
+
+ // append source address records.
+ for i := 0; i < int(gr.NumberOfSources); i++ {
+ sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4])
+ gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr)
+ }
+
+ i.GroupRecords = append(i.GroupRecords, gr)
+ recordOffset += 8 + 4*int(gr.NumberOfSources)
+ }
+ return nil
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type = 0x11 | Max Resp Code | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Group Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Resv |S| QRV | QQIC | Number of Sources (N) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Source Address [1] |
+// +- -+
+// | Source Address [2] |
+// +- . -+
+// | Source Address [N] |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11
+func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error {
+ if len(data) < 12 {
+ return errors.New("IGMPv3 Membership Query too small #1")
+ }
+
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.SupressRouterProcessing = data[8]&0x8 != 0
+ i.GroupAddress = net.IP(data[4:8])
+ i.RobustnessValue = data[8] & 0x7
+ i.IntervalTime = igmpTimeDecode(data[9])
+ i.NumberOfSources = binary.BigEndian.Uint16(data[10:12])
+
+ if len(data) < 12+int(i.NumberOfSources)*4 {
+ return errors.New("IGMPv3 Membership Query too small #2")
+ }
+
+ for j := 0; j < int(i.NumberOfSources); j++ {
+ i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4]))
+ }
+
+ return nil
+}
+
+// igmpTimeDecode decodes the duration created by the given byte, using the
+// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1.
+func igmpTimeDecode(t uint8) time.Duration {
+ if t&0x80 == 0 {
+ return time.Millisecond * 100 * time.Duration(t)
+ }
+ mant := (t & 0x70) >> 4
+ exp := t & 0x0F
+ return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3))
+}
+
+// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats.
+func (i *IGMP) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+
+func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ return errors.New("IGMP Packet too small")
+ }
+
+ i.Type = IGMPType(data[0])
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.GroupAddress = net.IP(data[4:8])
+
+ return nil
+}
+
+func (i *IGMPv1or2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+func (i *IGMPv1or2) CanDecode() gopacket.LayerClass {
+ return LayerTypeIGMP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 1 {
+ return errors.New("IGMP packet is too small")
+ }
+
+ // common IGMP header values between versions 1..3 of IGMP specification..
+ i.Type = IGMPType(data[0])
+
+ switch i.Type {
+ case IGMPMembershipQuery:
+ i.decodeIGMPv3MembershipQuery(data)
+ case IGMPMembershipReportV3:
+ i.decodeIGMPv3MembershipReport(data)
+ default:
+ return errors.New("unsupported IGMP type")
+ }
+
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *IGMP) CanDecode() gopacket.LayerClass {
+ return LayerTypeIGMP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *IGMP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the
+// IGMP type are performed against byte[0], logic then iniitalizes and
+// passes the appropriate struct (IGMP or IGMPv1or2) to
+// decodingLayerDecoder.
+func decodeIGMP(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 1 {
+ return errors.New("IGMP packet is too small")
+ }
+
+ // byte 0 contains IGMP message type.
+ switch IGMPType(data[0]) {
+ case IGMPMembershipQuery:
+ // IGMPv3 Membership Query payload is >= 12
+ if len(data) >= 12 {
+ i := &IGMP{Version: 3}
+ return decodingLayerDecoder(i, data, p)
+ } else if len(data) == 8 {
+ i := &IGMPv1or2{}
+ if data[1] == 0x00 {
+ i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0
+ } else {
+ i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0
+ }
+
+ return decodingLayerDecoder(i, data, p)
+ }
+ case IGMPMembershipReportV3:
+ i := &IGMP{Version: 3}
+ return decodingLayerDecoder(i, data, p)
+ case IGMPMembershipReportV1:
+ i := &IGMPv1or2{Version: 1}
+ return decodingLayerDecoder(i, data, p)
+ case IGMPLeaveGroup, IGMPMembershipReportV2:
+ // leave group and Query Report v2 used in IGMPv2 only.
+ i := &IGMPv1or2{Version: 2}
+ return decodingLayerDecoder(i, data, p)
+ default:
+ }
+
+ return errors.New("Unable to determine IGMP type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/google/gopacket/layers/ip4.go
new file mode 100644
index 0000000..2b3c0c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip4.go
@@ -0,0 +1,325 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+type IPv4Flag uint8
+
+const (
+ IPv4EvilBit IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;)
+ IPv4DontFragment IPv4Flag = 1 << 1
+ IPv4MoreFragments IPv4Flag = 1 << 0
+)
+
+func (f IPv4Flag) String() string {
+ var s []string
+ if f&IPv4EvilBit != 0 {
+ s = append(s, "Evil")
+ }
+ if f&IPv4DontFragment != 0 {
+ s = append(s, "DF")
+ }
+ if f&IPv4MoreFragments != 0 {
+ s = append(s, "MF")
+ }
+ return strings.Join(s, "|")
+}
+
+// IPv4 is the header of an IP packet.
+type IPv4 struct {
+ BaseLayer
+ Version uint8
+ IHL uint8
+ TOS uint8
+ Length uint16
+ Id uint16
+ Flags IPv4Flag
+ FragOffset uint16
+ TTL uint8
+ Protocol IPProtocol
+ Checksum uint16
+ SrcIP net.IP
+ DstIP net.IP
+ Options []IPv4Option
+ Padding []byte
+}
+
+// LayerType returns LayerTypeIPv4
+func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 }
+func (i *IPv4) NetworkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP)
+}
+
+type IPv4Option struct {
+ OptionType uint8
+ OptionLength uint8
+ OptionData []byte
+}
+
+func (i IPv4Option) String() string {
+ return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData)
+}
+
+// for the current ipv4 options, return the number of bytes (including
+// padding that the options used)
+func (ip *IPv4) getIPv4OptionSize() uint8 {
+ optionSize := uint8(0)
+ for _, opt := range ip.Options {
+ switch opt.OptionType {
+ case 0:
+ // this is the end of option lists
+ optionSize++
+ case 1:
+ // this is the padding
+ optionSize++
+ default:
+ optionSize += opt.OptionLength
+
+ }
+ }
+ // make sure the options are aligned to 32 bit boundary
+ if (optionSize % 4) != 0 {
+ optionSize += 4 - (optionSize % 4)
+ }
+ return optionSize
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ optionLength := ip.getIPv4OptionSize()
+ bytes, err := b.PrependBytes(20 + int(optionLength))
+ if err != nil {
+ return err
+ }
+ if opts.FixLengths {
+ ip.IHL = 5 + (optionLength / 4)
+ ip.Length = uint16(len(b.Bytes()))
+ }
+ bytes[0] = (ip.Version << 4) | ip.IHL
+ bytes[1] = ip.TOS
+ binary.BigEndian.PutUint16(bytes[2:], ip.Length)
+ binary.BigEndian.PutUint16(bytes[4:], ip.Id)
+ binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags())
+ bytes[8] = ip.TTL
+ bytes[9] = byte(ip.Protocol)
+ if err := ip.AddressTo4(); err != nil {
+ return err
+ }
+ copy(bytes[12:16], ip.SrcIP)
+ copy(bytes[16:20], ip.DstIP)
+
+ curLocation := 20
+ // Now, we will encode the options
+ for _, opt := range ip.Options {
+ switch opt.OptionType {
+ case 0:
+ // this is the end of option lists
+ bytes[curLocation] = 0
+ curLocation++
+ case 1:
+ // this is the padding
+ bytes[curLocation] = 1
+ curLocation++
+ default:
+ bytes[curLocation] = opt.OptionType
+ bytes[curLocation+1] = opt.OptionLength
+
+ // sanity checking to protect us from buffer overrun
+ if len(opt.OptionData) > int(opt.OptionLength-2) {
+ return errors.New("option length is smaller than length of option data")
+ }
+ copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData)
+ curLocation += int(opt.OptionLength)
+ }
+ }
+
+ if opts.ComputeChecksums {
+ ip.Checksum = checksum(bytes)
+ }
+ binary.BigEndian.PutUint16(bytes[10:], ip.Checksum)
+ return nil
+}
+
+func checksum(bytes []byte) uint16 {
+ // Clear checksum bytes
+ bytes[10] = 0
+ bytes[11] = 0
+
+ // Compute checksum
+ var csum uint32
+ for i := 0; i < len(bytes); i += 2 {
+ csum += uint32(bytes[i]) << 8
+ csum += uint32(bytes[i+1])
+ }
+ for {
+ // Break when sum is less or equals to 0xFFFF
+ if csum <= 65535 {
+ break
+ }
+ // Add carry to the sum
+ csum = (csum >> 16) + uint32(uint16(csum))
+ }
+ // Flip all the bits
+ return ^uint16(csum)
+}
+
+func (ip *IPv4) flagsfrags() (ff uint16) {
+ ff |= uint16(ip.Flags) << 13
+ ff |= ip.FragOffset
+ return
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data))
+ }
+ flagsfrags := binary.BigEndian.Uint16(data[6:8])
+
+ ip.Version = uint8(data[0]) >> 4
+ ip.IHL = uint8(data[0]) & 0x0F
+ ip.TOS = data[1]
+ ip.Length = binary.BigEndian.Uint16(data[2:4])
+ ip.Id = binary.BigEndian.Uint16(data[4:6])
+ ip.Flags = IPv4Flag(flagsfrags >> 13)
+ ip.FragOffset = flagsfrags & 0x1FFF
+ ip.TTL = data[8]
+ ip.Protocol = IPProtocol(data[9])
+ ip.Checksum = binary.BigEndian.Uint16(data[10:12])
+ ip.SrcIP = data[12:16]
+ ip.DstIP = data[16:20]
+ ip.Options = ip.Options[:0]
+ ip.Padding = nil
+ // Set up an initial guess for contents/payload... we'll reset these soon.
+ ip.BaseLayer = BaseLayer{Contents: data}
+
+ // This code is added for the following enviroment:
+ // * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver )
+ if ip.Length == 0 {
+ // If using TSO(TCP Segmentation Offload), length is zero.
+ // The actual packet length is the length of data.
+ ip.Length = uint16(len(data))
+ }
+
+ if ip.Length < 20 {
+ return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length)
+ } else if ip.IHL < 5 {
+ return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL)
+ } else if int(ip.IHL*4) > int(ip.Length) {
+ return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length)
+ }
+ if cmp := len(data) - int(ip.Length); cmp > 0 {
+ data = data[:ip.Length]
+ } else if cmp < 0 {
+ df.SetTruncated()
+ if int(ip.IHL)*4 > len(data) {
+ return errors.New("Not all IP header bytes available")
+ }
+ }
+ ip.Contents = data[:ip.IHL*4]
+ ip.Payload = data[ip.IHL*4:]
+ // From here on, data contains the header options.
+ data = data[20 : ip.IHL*4]
+ // Pull out IP options
+ for len(data) > 0 {
+ if ip.Options == nil {
+ // Pre-allocate to avoid growing the slice too much.
+ ip.Options = make([]IPv4Option, 0, 4)
+ }
+ opt := IPv4Option{OptionType: data[0]}
+ switch opt.OptionType {
+ case 0: // End of options
+ opt.OptionLength = 1
+ ip.Options = append(ip.Options, opt)
+ ip.Padding = data[1:]
+ return nil
+ case 1: // 1 byte padding
+ opt.OptionLength = 1
+ data = data[1:]
+ ip.Options = append(ip.Options, opt)
+ default:
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data))
+ }
+ opt.OptionLength = data[1]
+ if len(data) < int(opt.OptionLength) {
+ df.SetTruncated()
+ return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength)
+ }
+ if opt.OptionLength <= 2 {
+ return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength)
+ }
+ opt.OptionData = data[2:opt.OptionLength]
+ data = data[opt.OptionLength:]
+ ip.Options = append(ip.Options, opt)
+ }
+ }
+ return nil
+}
+
+func (i *IPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeIPv4
+}
+
+func (i *IPv4) NextLayerType() gopacket.LayerType {
+ if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 {
+ return gopacket.LayerTypeFragment
+ }
+ return i.Protocol.LayerType()
+}
+
+func decodeIPv4(data []byte, p gopacket.PacketBuilder) error {
+ ip := &IPv4{}
+ err := ip.DecodeFromBytes(data, p)
+ p.AddLayer(ip)
+ p.SetNetworkLayer(ip)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(ip.NextLayerType())
+}
+
+func checkIPv4Address(addr net.IP) (net.IP, error) {
+ if c := addr.To4(); c != nil {
+ return c, nil
+ }
+ if len(addr) == net.IPv6len {
+ return nil, errors.New("address is IPv6")
+ }
+ return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len)
+}
+
+func (ip *IPv4) AddressTo4() error {
+ var src, dst net.IP
+
+ if addr, err := checkIPv4Address(ip.SrcIP); err != nil {
+ return fmt.Errorf("Invalid source IPv4 address (%s)", err)
+ } else {
+ src = addr
+ }
+ if addr, err := checkIPv4Address(ip.DstIP); err != nil {
+ return fmt.Errorf("Invalid destination IPv4 address (%s)", err)
+ } else {
+ dst = addr
+ }
+ ip.SrcIP = src
+ ip.DstIP = dst
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/google/gopacket/layers/ip6.go
new file mode 100644
index 0000000..70e9c8d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip6.go
@@ -0,0 +1,707 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // IPv6HopByHopOptionJumbogram code as defined in RFC 2675
+ IPv6HopByHopOptionJumbogram = 0xC2
+)
+
+const (
+ ipv6MaxPayloadLength = 65535
+)
+
+// IPv6 is the layer for the IPv6 header.
+type IPv6 struct {
+ // http://www.networksorcery.com/enp/protocol/ipv6.htm
+ BaseLayer
+ Version uint8
+ TrafficClass uint8
+ FlowLabel uint32
+ Length uint16
+ NextHeader IPProtocol
+ HopLimit uint8
+ SrcIP net.IP
+ DstIP net.IP
+ HopByHop *IPv6HopByHop
+ // hbh will be pointed to by HopByHop if that layer exists.
+ hbh IPv6HopByHop
+}
+
+// LayerType returns LayerTypeIPv6
+func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
+
+// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP)
+func (ipv6 *IPv6) NetworkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP)
+}
+
+// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found
+func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) {
+ var tlv *IPv6HopByHopOption
+
+ for _, t := range hopopts.Options {
+ if t.OptionType == IPv6HopByHopOptionJumbogram {
+ tlv = t
+ break
+ }
+ }
+ if tlv == nil {
+ // Not found
+ return 0, false, nil
+ }
+ if len(tlv.OptionData) != 4 {
+ return 0, false, errors.New("Jumbo length TLV data must have length 4")
+ }
+ l := binary.BigEndian.Uint32(tlv.OptionData)
+ if l <= ipv6MaxPayloadLength {
+ return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1)
+ }
+ // Found
+ return l, true, nil
+}
+
+// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist
+// (if necessary add hop-by-hop header)
+func addIPv6JumboOption(ip6 *IPv6) {
+ var tlv *IPv6HopByHopOption
+
+ if ip6.HopByHop == nil {
+ // Add IPv6 HopByHop
+ ip6.HopByHop = &IPv6HopByHop{}
+ ip6.HopByHop.NextHeader = ip6.NextHeader
+ ip6.HopByHop.HeaderLength = 0
+ ip6.NextHeader = IPProtocolIPv6HopByHop
+ }
+ for _, t := range ip6.HopByHop.Options {
+ if t.OptionType == IPv6HopByHopOptionJumbogram {
+ tlv = t
+ break
+ }
+ }
+ if tlv == nil {
+ // Add Jumbo TLV
+ tlv = &IPv6HopByHopOption{}
+ ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv)
+ }
+ tlv.SetJumboLength(0)
+}
+
+// Set jumbo length in serialized IPv6 payload (starting with HopByHop header)
+func setIPv6PayloadJumboLength(hbh []byte) error {
+ pLen := len(hbh)
+ if pLen < 8 {
+ //HopByHop is minimum 8 bytes
+ return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen)
+ }
+ hbhLen := int((hbh[1] + 1) * 8)
+ if hbhLen > pLen {
+ return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen)
+ }
+ offset := 2 //start with options
+ for offset < hbhLen {
+ opt := hbh[offset]
+ if opt == 0 {
+ //Pad1
+ offset++
+ continue
+ }
+ optLen := int(hbh[offset+1])
+ if opt == IPv6HopByHopOptionJumbogram {
+ if optLen == 4 {
+ binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen))
+ return nil
+ }
+ return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen)
+ }
+ offset += 2 + optLen
+ }
+ return errors.New("Jumbo TLV not found")
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var jumbo bool
+ var err error
+
+ payload := b.Bytes()
+ pLen := len(payload)
+ if pLen > ipv6MaxPayloadLength {
+ jumbo = true
+ if opts.FixLengths {
+ // We need to set the length later because the hop-by-hop header may
+ // not exist or else need padding, so pLen may yet change
+ addIPv6JumboOption(ipv6)
+ } else if ipv6.HopByHop == nil {
+ return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen)
+ } else {
+ _, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return errors.New("Missing jumbo length hop-by-hop option")
+ }
+ }
+ }
+
+ hbhAlreadySerialized := false
+ if ipv6.HopByHop != nil {
+ for _, l := range b.Layers() {
+ if l == LayerTypeIPv6HopByHop {
+ hbhAlreadySerialized = true
+ break
+ }
+ }
+ }
+ if ipv6.HopByHop != nil && !hbhAlreadySerialized {
+ if ipv6.NextHeader != IPProtocolIPv6HopByHop {
+ // Just fix it instead of throwing an error
+ ipv6.NextHeader = IPProtocolIPv6HopByHop
+ }
+ err = ipv6.HopByHop.SerializeTo(b, opts)
+ if err != nil {
+ return err
+ }
+ payload = b.Bytes()
+ pLen = len(payload)
+ if opts.FixLengths && jumbo {
+ err := setIPv6PayloadJumboLength(payload)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if !jumbo && pLen > ipv6MaxPayloadLength {
+ return errors.New("Cannot fit payload into IPv6 header")
+ }
+ bytes, err := b.PrependBytes(40)
+ if err != nil {
+ return err
+ }
+ bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4)
+ bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16)
+ binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel))
+ if opts.FixLengths {
+ if jumbo {
+ ipv6.Length = 0
+ } else {
+ ipv6.Length = uint16(pLen)
+ }
+ }
+ binary.BigEndian.PutUint16(bytes[4:], ipv6.Length)
+ bytes[6] = byte(ipv6.NextHeader)
+ bytes[7] = byte(ipv6.HopLimit)
+ if err := ipv6.AddressTo16(); err != nil {
+ return err
+ }
+ copy(bytes[8:], ipv6.SrcIP)
+ copy(bytes[24:], ipv6.DstIP)
+ return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 40 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data))
+ }
+ ipv6.Version = uint8(data[0]) >> 4
+ ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
+ ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
+ ipv6.Length = binary.BigEndian.Uint16(data[4:6])
+ ipv6.NextHeader = IPProtocol(data[6])
+ ipv6.HopLimit = data[7]
+ ipv6.SrcIP = data[8:24]
+ ipv6.DstIP = data[24:40]
+ ipv6.HopByHop = nil
+ ipv6.BaseLayer = BaseLayer{data[:40], data[40:]}
+
+ // We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
+ // options are crucial for understanding what's actually happening per packet.
+ if ipv6.NextHeader == IPProtocolIPv6HopByHop {
+ err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df)
+ if err != nil {
+ return err
+ }
+ ipv6.HopByHop = &ipv6.hbh
+ pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+ if err != nil {
+ return err
+ }
+ if jumbo && ipv6.Length == 0 {
+ pEnd := int(pEnd)
+ if pEnd > len(ipv6.Payload) {
+ df.SetTruncated()
+ pEnd = len(ipv6.Payload)
+ }
+ ipv6.Payload = ipv6.Payload[:pEnd]
+ return nil
+ } else if jumbo && ipv6.Length != 0 {
+ return errors.New("IPv6 has jumbo length and IPv6 length is not 0")
+ } else if !jumbo && ipv6.Length == 0 {
+ return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option")
+ } else {
+ ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:]
+ }
+ }
+
+ if ipv6.Length == 0 {
+ return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader)
+ }
+
+ pEnd := int(ipv6.Length)
+ if pEnd > len(ipv6.Payload) {
+ df.SetTruncated()
+ pEnd = len(ipv6.Payload)
+ }
+ ipv6.Payload = ipv6.Payload[:pEnd]
+
+ return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeIPv6
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) NextLayerType() gopacket.LayerType {
+ if ipv6.HopByHop != nil {
+ return ipv6.HopByHop.NextHeader.LayerType()
+ }
+ return ipv6.NextHeader.LayerType()
+}
+
+func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
+ ip6 := &IPv6{}
+ err := ip6.DecodeFromBytes(data, p)
+ p.AddLayer(ip6)
+ p.SetNetworkLayer(ip6)
+ if ip6.HopByHop != nil {
+ p.AddLayer(ip6.HopByHop)
+ }
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(ip6.NextLayerType())
+}
+
+type ipv6HeaderTLVOption struct {
+ OptionType, OptionLength uint8
+ ActualLength int
+ OptionData []byte
+ OptionAlignment [2]uint8 // Xn+Y = [2]uint8{X, Y}
+}
+
+func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int {
+ if fixLengths {
+ h.OptionLength = uint8(len(h.OptionData))
+ }
+ length := int(h.OptionLength) + 2
+ if !dryrun {
+ data[0] = h.OptionType
+ data[1] = h.OptionLength
+ copy(data[2:], h.OptionData)
+ }
+ return length
+}
+
+func decodeIPv6HeaderTLVOption(data []byte) (h *ipv6HeaderTLVOption) {
+ h = &ipv6HeaderTLVOption{}
+ if data[0] == 0 {
+ h.ActualLength = 1
+ return
+ }
+ h.OptionType = data[0]
+ h.OptionLength = data[1]
+ h.ActualLength = int(h.OptionLength) + 2
+ h.OptionData = data[2:h.ActualLength]
+ return
+}
+
+func serializeTLVOptionPadding(data []byte, padLength int) {
+ if padLength <= 0 {
+ return
+ }
+ if padLength == 1 {
+ data[0] = 0x0
+ return
+ }
+ tlvLength := uint8(padLength) - 2
+ data[0] = 0x1
+ data[1] = tlvLength
+ if tlvLength != 0 {
+ for k := range data[2:] {
+ data[k+2] = 0x0
+ }
+ }
+ return
+}
+
+// If buf is 'nil' do a serialize dry run
+func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int {
+ var l int
+
+ dryrun := buf == nil
+ length := 2
+ for _, opt := range options {
+ if fixLengths {
+ x := int(opt.OptionAlignment[0])
+ y := int(opt.OptionAlignment[1])
+ if x != 0 {
+ n := length / x
+ offset := x*n + y
+ if offset < length {
+ offset += x
+ }
+ if length != offset {
+ pad := offset - length
+ if !dryrun {
+ serializeTLVOptionPadding(buf[length-2:], pad)
+ }
+ length += pad
+ }
+ }
+ }
+ if dryrun {
+ l = opt.serializeTo(nil, fixLengths, true)
+ } else {
+ l = opt.serializeTo(buf[length-2:], fixLengths, false)
+ }
+ length += l
+ }
+ if fixLengths {
+ pad := length % 8
+ if pad != 0 {
+ if !dryrun {
+ serializeTLVOptionPadding(buf[length-2:], pad)
+ }
+ length += pad
+ }
+ }
+ return length - 2
+}
+
+type ipv6ExtensionBase struct {
+ BaseLayer
+ NextHeader IPProtocol
+ HeaderLength uint8
+ ActualLength int
+}
+
+func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data))
+ }
+ i.NextHeader = IPProtocol(data[0])
+ i.HeaderLength = data[1]
+ i.ActualLength = int(i.HeaderLength)*8 + 8
+ if len(data) < i.ActualLength {
+ return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength)
+ }
+ i.Contents = data[:i.ActualLength]
+ i.Payload = data[i.ActualLength:]
+ return
+}
+
+// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
+// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
+// which may or may not have extensions.
+type IPv6ExtensionSkipper struct {
+ NextHeader IPProtocol
+ BaseLayer
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ extension, err := decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
+ i.NextHeader = extension.NextHeader
+ return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
+ return LayerClassIPv6Extension
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
+ return i.NextHeader.LayerType()
+}
+
+// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
+type IPv6HopByHopOption ipv6HeaderTLVOption
+
+// IPv6HopByHop is the IPv6 hop-by-hop extension.
+type IPv6HopByHop struct {
+ ipv6ExtensionBase
+ Options []*IPv6HopByHopOption
+}
+
+// LayerType returns LayerTypeIPv6HopByHop.
+func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
+
+// SerializeTo implementation according to gopacket.SerializableLayer
+func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var bytes []byte
+ var err error
+
+ o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+ for _, v := range i.Options {
+ o = append(o, (*ipv6HeaderTLVOption)(v))
+ }
+
+ l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+ bytes, err = b.PrependBytes(l)
+ if err != nil {
+ return err
+ }
+ serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+ length := len(bytes) + 2
+ if length%8 != 0 {
+ return errors.New("IPv6HopByHop actual length must be multiple of 8")
+ }
+ bytes, err = b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(i.NextHeader)
+ if opts.FixLengths {
+ i.HeaderLength = uint8((length / 8) - 1)
+ }
+ bytes[1] = uint8(i.HeaderLength)
+ return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var err error
+ i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ offset := 2
+ for offset < i.ActualLength {
+ opt := decodeIPv6HeaderTLVOption(data[offset:])
+ i.Options = append(i.Options, (*IPv6HopByHopOption)(opt))
+ offset += opt.ActualLength
+ }
+ return nil
+}
+
+func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPv6HopByHop{}
+ err := i.DecodeFromBytes(data, p)
+ p.AddLayer(i)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(i.NextHeader)
+}
+
+// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length
+func (o *IPv6HopByHopOption) SetJumboLength(len uint32) {
+ o.OptionType = IPv6HopByHopOptionJumbogram
+ o.OptionLength = 4
+ o.ActualLength = 6
+ if o.OptionData == nil {
+ o.OptionData = make([]byte, 4)
+ }
+ binary.BigEndian.PutUint32(o.OptionData, len)
+ o.OptionAlignment = [2]uint8{4, 2}
+}
+
+// IPv6Routing is the IPv6 routing extension.
+type IPv6Routing struct {
+ ipv6ExtensionBase
+ RoutingType uint8
+ SegmentsLeft uint8
+ // This segment is supposed to be zero according to RFC2460, the second set of
+ // 4 bytes in the extension.
+ Reserved []byte
+ // SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
+ // set only if RoutingType == 0.
+ SourceRoutingIPs []net.IP
+}
+
+// LayerType returns LayerTypeIPv6Routing.
+func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
+
+func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
+ base, err := decodeIPv6ExtensionBase(data, p)
+ if err != nil {
+ return err
+ }
+ i := &IPv6Routing{
+ ipv6ExtensionBase: base,
+ RoutingType: data[2],
+ SegmentsLeft: data[3],
+ Reserved: data[4:8],
+ }
+ switch i.RoutingType {
+ case 0: // Source routing
+ if (i.ActualLength-8)%16 != 0 {
+ return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength)
+ }
+ for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
+ i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
+ }
+ default:
+ return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType)
+ }
+ p.AddLayer(i)
+ return p.NextDecoder(i.NextHeader)
+}
+
+// IPv6Fragment is the IPv6 fragment header, used for packet
+// fragmentation/defragmentation.
+type IPv6Fragment struct {
+ BaseLayer
+ NextHeader IPProtocol
+ // Reserved1 is bits [8-16), from least to most significant, 0-indexed
+ Reserved1 uint8
+ FragmentOffset uint16
+ // Reserved2 is bits [29-31), from least to most significant, 0-indexed
+ Reserved2 uint8
+ MoreFragments bool
+ Identification uint32
+}
+
+// LayerType returns LayerTypeIPv6Fragment.
+func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
+
+func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 8 {
+ p.SetTruncated()
+ return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data))
+ }
+ i := &IPv6Fragment{
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ NextHeader: IPProtocol(data[0]),
+ Reserved1: data[1],
+ FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
+ Reserved2: data[3] & 0x6 >> 1,
+ MoreFragments: data[3]&0x1 != 0,
+ Identification: binary.BigEndian.Uint32(data[4:8]),
+ }
+ p.AddLayer(i)
+ return p.NextDecoder(gopacket.DecodeFragment)
+}
+
+// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
+type IPv6DestinationOption ipv6HeaderTLVOption
+
+// IPv6Destination is the IPv6 destination options header.
+type IPv6Destination struct {
+ ipv6ExtensionBase
+ Options []*IPv6DestinationOption
+}
+
+// LayerType returns LayerTypeIPv6Destination.
+func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var err error
+ i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ offset := 2
+ for offset < i.ActualLength {
+ opt := decodeIPv6HeaderTLVOption(data[offset:])
+ i.Options = append(i.Options, (*IPv6DestinationOption)(opt))
+ offset += opt.ActualLength
+ }
+ return nil
+}
+
+func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPv6Destination{}
+ err := i.DecodeFromBytes(data, p)
+ p.AddLayer(i)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(i.NextHeader)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var bytes []byte
+ var err error
+
+ o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+ for _, v := range i.Options {
+ o = append(o, (*ipv6HeaderTLVOption)(v))
+ }
+
+ l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+ bytes, err = b.PrependBytes(l)
+ if err != nil {
+ return err
+ }
+ serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+ length := len(bytes) + 2
+ if length%8 != 0 {
+ return errors.New("IPv6Destination actual length must be multiple of 8")
+ }
+ bytes, err = b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(i.NextHeader)
+ if opts.FixLengths {
+ i.HeaderLength = uint8((length / 8) - 1)
+ }
+ bytes[1] = uint8(i.HeaderLength)
+ return nil
+}
+
+func checkIPv6Address(addr net.IP) error {
+ if len(addr) == net.IPv6len {
+ return nil
+ }
+ if len(addr) == net.IPv4len {
+ return errors.New("address is IPv4")
+ }
+ return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len)
+}
+
+// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses)
+func (ipv6 *IPv6) AddressTo16() error {
+ if err := checkIPv6Address(ipv6.SrcIP); err != nil {
+ return fmt.Errorf("Invalid source IPv6 address (%s)", err)
+ }
+ if err := checkIPv6Address(ipv6.DstIP); err != nil {
+ return fmt.Errorf("Invalid destination IPv6 address (%s)", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/google/gopacket/layers/ipsec.go
new file mode 100644
index 0000000..19163fa
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ipsec.go
@@ -0,0 +1,68 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// IPSecAH is the authentication header for IPv4/6 defined in
+// http://tools.ietf.org/html/rfc2402
+type IPSecAH struct {
+ // While the auth header can be used for both IPv4 and v6, its format is that of
+ // an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase
+ // to build it.
+ ipv6ExtensionBase
+ Reserved uint16
+ SPI, Seq uint32
+ AuthenticationData []byte
+}
+
+// LayerType returns LayerTypeIPSecAH.
+func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH }
+
+func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPSecAH{
+ ipv6ExtensionBase: ipv6ExtensionBase{
+ NextHeader: IPProtocol(data[0]),
+ HeaderLength: data[1],
+ },
+ Reserved: binary.BigEndian.Uint16(data[2:4]),
+ SPI: binary.BigEndian.Uint32(data[4:8]),
+ Seq: binary.BigEndian.Uint32(data[8:12]),
+ }
+ i.ActualLength = (int(i.HeaderLength) + 2) * 4
+ i.AuthenticationData = data[12:i.ActualLength]
+ i.Contents = data[:i.ActualLength]
+ i.Payload = data[i.ActualLength:]
+ p.AddLayer(i)
+ return p.NextDecoder(i.NextHeader)
+}
+
+// IPSecESP is the encapsulating security payload defined in
+// http://tools.ietf.org/html/rfc2406
+type IPSecESP struct {
+ BaseLayer
+ SPI, Seq uint32
+ // Encrypted contains the encrypted set of bytes sent in an ESP
+ Encrypted []byte
+}
+
+// LayerType returns LayerTypeIPSecESP.
+func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP }
+
+func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPSecESP{
+ BaseLayer: BaseLayer{data, nil},
+ SPI: binary.BigEndian.Uint32(data[:4]),
+ Seq: binary.BigEndian.Uint32(data[4:8]),
+ Encrypted: data[8:],
+ }
+ p.AddLayer(i)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/google/gopacket/layers/layertypes.go
new file mode 100644
index 0000000..56fdb5a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/layertypes.go
@@ -0,0 +1,218 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+var (
+ LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)})
+ LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)})
+ LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)})
+ LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil})
+ LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil})
+ LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)})
+ LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)})
+ LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)})
+ LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)})
+ LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)})
+ LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)})
+ LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)})
+ LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)})
+ LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)})
+ LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)})
+ LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)})
+ LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)})
+ LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)})
+ LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)})
+ LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil})
+ LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil})
+ LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil})
+ LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil})
+ LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil})
+ LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil})
+ LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil})
+ LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil})
+ LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil})
+ LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil})
+ LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil})
+ LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil})
+ LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil})
+ LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil})
+ LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil})
+ LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)})
+ LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)})
+ LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)})
+ LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)})
+ LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)})
+ LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)})
+ LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)})
+ LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)})
+ LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)})
+ LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)})
+ LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)})
+ LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)})
+ LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)})
+ LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)})
+ LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)})
+ LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)})
+ LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil})
+ LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)})
+ LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)})
+ LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)})
+ LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)})
+ LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)})
+ LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)})
+ LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)})
+ LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+ LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+ LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+ LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)})
+ LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+ LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+ LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+ LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)})
+ LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)})
+ LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)})
+ LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)})
+ LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)})
+ LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)})
+ LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)})
+ LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)})
+ LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)})
+ LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)})
+ LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)})
+ LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)})
+ LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)})
+ LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)})
+ LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)})
+ LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)})
+ LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)})
+ LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)})
+ LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)})
+ LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)})
+ LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)})
+ LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)})
+ LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)})
+ LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)})
+ LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)})
+ LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)})
+ LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)})
+ LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)})
+ LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)})
+ LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)})
+ LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)})
+ LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)})
+ LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)})
+ LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)})
+ LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)})
+ LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)})
+ LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)})
+ LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)})
+ LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)})
+ LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)})
+ LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)})
+ LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)})
+ LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)})
+ LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)})
+ LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)})
+ LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)})
+ LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)})
+ LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)})
+ LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)})
+ LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)})
+ LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)})
+ LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)})
+ LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)})
+ LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)})
+ LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)})
+ LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)})
+ LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)})
+ LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)})
+ LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)})
+ LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)})
+ LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)})
+ LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)})
+ LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)})
+ LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)})
+ LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)})
+ LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)})
+ LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)})
+)
+
+var (
+ // LayerClassIPNetwork contains TCP/IP network layer types.
+ LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPv4,
+ LayerTypeIPv6,
+ })
+ // LayerClassIPTransport contains TCP/IP transport layer types.
+ LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeTCP,
+ LayerTypeUDP,
+ LayerTypeSCTP,
+ })
+ // LayerClassIPControl contains TCP/IP control protocols.
+ LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeICMPv4,
+ LayerTypeICMPv6,
+ })
+ // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP
+ // layer).
+ LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeSCTPUnknownChunkType,
+ LayerTypeSCTPData,
+ LayerTypeSCTPInit,
+ LayerTypeSCTPSack,
+ LayerTypeSCTPHeartbeat,
+ LayerTypeSCTPError,
+ LayerTypeSCTPShutdown,
+ LayerTypeSCTPShutdownAck,
+ LayerTypeSCTPCookieEcho,
+ LayerTypeSCTPEmptyLayer,
+ LayerTypeSCTPInitAck,
+ LayerTypeSCTPHeartbeatAck,
+ LayerTypeSCTPAbort,
+ LayerTypeSCTPShutdownComplete,
+ LayerTypeSCTPCookieAck,
+ })
+ // LayerClassIPv6Extension contains IPv6 extension headers.
+ LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPv6HopByHop,
+ LayerTypeIPv6Routing,
+ LayerTypeIPv6Fragment,
+ LayerTypeIPv6Destination,
+ })
+ LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPSecAH,
+ LayerTypeIPSecESP,
+ })
+ // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol
+ // messages.
+ LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeICMPv6RouterSolicitation,
+ LayerTypeICMPv6RouterAdvertisement,
+ LayerTypeICMPv6NeighborSolicitation,
+ LayerTypeICMPv6NeighborAdvertisement,
+ LayerTypeICMPv6Redirect,
+ })
+ // LayerClassMLDv1 contains multicast listener discovery protocol
+ LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeMLDv1MulticastListenerQuery,
+ LayerTypeMLDv1MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerDone,
+ })
+ // LayerClassMLDv2 contains multicast listener discovery protocol v2
+ LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeMLDv1MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerDone,
+ LayerTypeMLDv2MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerQuery,
+ LayerTypeMLDv2MulticastListenerQuery,
+ })
+)
diff --git a/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/google/gopacket/layers/lcm.go
new file mode 100644
index 0000000..5fe9fa5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lcm.go
@@ -0,0 +1,213 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // LCMShortHeaderMagic is the LCM small message header magic number
+ LCMShortHeaderMagic uint32 = 0x4c433032
+ // LCMFragmentedHeaderMagic is the LCM fragmented message header magic number
+ LCMFragmentedHeaderMagic uint32 = 0x4c433033
+)
+
+// LCM (Lightweight Communications and Marshalling) is a set of libraries and
+// tools for message passing and data marshalling, targeted at real-time systems
+// where high-bandwidth and low latency are critical. It provides a
+// publish/subscribe message passing model and automatic
+// marshalling/unmarshalling code generation with bindings for applications in a
+// variety of programming languages.
+//
+// References
+// https://lcm-proj.github.io/
+// https://github.com/lcm-proj/lcm
+type LCM struct {
+ // Common (short & fragmented header) fields
+ Magic uint32
+ SequenceNumber uint32
+ // Fragmented header only fields
+ PayloadSize uint32
+ FragmentOffset uint32
+ FragmentNumber uint16
+ TotalFragments uint16
+ // Common field
+ ChannelName string
+ // Gopacket helper fields
+ Fragmented bool
+ fingerprint LCMFingerprint
+ contents []byte
+ payload []byte
+}
+
+// LCMFingerprint is the type of a LCM fingerprint.
+type LCMFingerprint uint64
+
+var (
+ // lcmLayerTypes contains a map of all LCM fingerprints that we support and
+ // their LayerType
+ lcmLayerTypes = map[LCMFingerprint]gopacket.LayerType{}
+ layerTypeIndex = 1001
+)
+
+// RegisterLCMLayerType allows users to register decoders for the underlying
+// LCM payload. This is done based on the fingerprint that every LCM message
+// contains and which identifies it uniquely. If num is not the zero value it
+// will be used when registering with RegisterLayerType towards gopacket,
+// otherwise an incremental value starting from 1001 will be used.
+func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint,
+ decoder gopacket.Decoder) gopacket.LayerType {
+ metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder}
+
+ if num == 0 {
+ num = layerTypeIndex
+ layerTypeIndex++
+ }
+
+ lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata)
+
+ return lcmLayerTypes[fingerprint]
+}
+
+// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has
+// been registered so far.
+func SupportedLCMFingerprints() []LCMFingerprint {
+ fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes))
+ for fp := range lcmLayerTypes {
+ fingerprints = append(fingerprints, fp)
+ }
+ return fingerprints
+}
+
+// GetLCMLayerType returns the underlying LCM message's LayerType.
+// This LayerType has to be registered by using RegisterLCMLayerType.
+func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType {
+ layerType, ok := lcmLayerTypes[fingerprint]
+ if !ok {
+ return gopacket.LayerTypePayload
+ }
+
+ return layerType
+}
+
+func decodeLCM(data []byte, p gopacket.PacketBuilder) error {
+ lcm := &LCM{}
+
+ err := lcm.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ p.AddLayer(lcm)
+ p.SetApplicationLayer(lcm)
+
+ return p.NextDecoder(lcm.NextLayerType())
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ offset := 0
+
+ lcm.Magic = binary.BigEndian.Uint32(data[offset:4])
+ offset += 4
+
+ if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic {
+ return fmt.Errorf("Received LCM header magic %v does not match know "+
+ "LCM magic numbers. Dropping packet.", lcm.Magic)
+ }
+
+ lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8])
+ offset += 4
+
+ if lcm.Magic == LCMFragmentedHeaderMagic {
+ lcm.Fragmented = true
+
+ lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+
+ lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+
+ lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2])
+ offset += 2
+
+ lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ } else {
+ lcm.Fragmented = false
+ }
+
+ if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+ buffer := make([]byte, 0)
+ for _, b := range data[offset:] {
+ offset++
+
+ if b == 0 {
+ break
+ }
+
+ buffer = append(buffer, b)
+ }
+
+ lcm.ChannelName = string(buffer)
+ }
+
+ lcm.fingerprint = LCMFingerprint(
+ binary.BigEndian.Uint64(data[offset : offset+8]))
+
+ lcm.contents = data[:offset]
+ lcm.payload = data[offset:]
+
+ return nil
+}
+
+// CanDecode returns a set of layers that LCM objects can decode.
+// As LCM objects can only decode the LCM layer, we just return that layer.
+func (lcm LCM) CanDecode() gopacket.LayerClass {
+ return LayerTypeLCM
+}
+
+// NextLayerType specifies the LCM payload layer type following this header.
+// As LCM packets are serialized structs with uniq fingerprints for each uniq
+// combination of data types, lookup of correct layer type is based on that
+// fingerprint.
+func (lcm LCM) NextLayerType() gopacket.LayerType {
+ if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+ return GetLCMLayerType(lcm.fingerprint)
+ }
+
+ return gopacket.LayerTypeFragment
+}
+
+// LayerType returns LayerTypeLCM
+func (lcm LCM) LayerType() gopacket.LayerType {
+ return LayerTypeLCM
+}
+
+// LayerContents returns the contents of the LCM header.
+func (lcm LCM) LayerContents() []byte {
+ return lcm.contents
+}
+
+// LayerPayload returns the payload following this LCM header.
+func (lcm LCM) LayerPayload() []byte {
+ return lcm.payload
+}
+
+// Payload returns the payload following this LCM header.
+func (lcm LCM) Payload() []byte {
+ return lcm.LayerPayload()
+}
+
+// Fingerprint returns the LCM fingerprint of the underlying message.
+func (lcm LCM) Fingerprint() LCMFingerprint {
+ return lcm.fingerprint
+}
diff --git a/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/google/gopacket/layers/linux_sll.go
new file mode 100644
index 0000000..85a4f8b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/linux_sll.go
@@ -0,0 +1,98 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+type LinuxSLLPacketType uint16
+
+const (
+ LinuxSLLPacketTypeHost LinuxSLLPacketType = 0 // To us
+ LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all
+ LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group
+ LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else
+ LinuxSLLPacketTypeOutgoing LinuxSLLPacketType = 4 // Outgoing of any type
+ // These ones are invisible by user level
+ LinuxSLLPacketTypeLoopback LinuxSLLPacketType = 5 // MC/BRD frame looped back
+ LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame
+)
+
+func (l LinuxSLLPacketType) String() string {
+ switch l {
+ case LinuxSLLPacketTypeHost:
+ return "host"
+ case LinuxSLLPacketTypeBroadcast:
+ return "broadcast"
+ case LinuxSLLPacketTypeMulticast:
+ return "multicast"
+ case LinuxSLLPacketTypeOtherhost:
+ return "otherhost"
+ case LinuxSLLPacketTypeOutgoing:
+ return "outgoing"
+ case LinuxSLLPacketTypeLoopback:
+ return "loopback"
+ case LinuxSLLPacketTypeFastroute:
+ return "fastroute"
+ }
+ return fmt.Sprintf("Unknown(%d)", int(l))
+}
+
+type LinuxSLL struct {
+ BaseLayer
+ PacketType LinuxSLLPacketType
+ AddrLen uint16
+ Addr net.HardwareAddr
+ EthernetType EthernetType
+ AddrType uint16
+}
+
+// LayerType returns LayerTypeLinuxSLL.
+func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL }
+
+func (sll *LinuxSLL) CanDecode() gopacket.LayerClass {
+ return LayerTypeLinuxSLL
+}
+
+func (sll *LinuxSLL) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, sll.Addr, nil)
+}
+
+func (sll *LinuxSLL) NextLayerType() gopacket.LayerType {
+ return sll.EthernetType.LayerType()
+}
+
+func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 16 {
+ return errors.New("Linux SLL packet too small")
+ }
+ sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2]))
+ sll.AddrType = binary.BigEndian.Uint16(data[2:4])
+ sll.AddrLen = binary.BigEndian.Uint16(data[4:6])
+
+ sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6])
+ sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16]))
+ sll.BaseLayer = BaseLayer{data[:16], data[16:]}
+
+ return nil
+}
+
+func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error {
+ sll := &LinuxSLL{}
+ if err := sll.DecodeFromBytes(data, p); err != nil {
+ return err
+ }
+ p.AddLayer(sll)
+ p.SetLinkLayer(sll)
+ return p.NextDecoder(sll.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go
new file mode 100644
index 0000000..cad6803
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/llc.go
@@ -0,0 +1,193 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// LLC is the layer used for 802.2 Logical Link Control headers.
+// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf
+type LLC struct {
+ BaseLayer
+ DSAP uint8
+ IG bool // true means group, false means individual
+ SSAP uint8
+ CR bool // true means response, false means command
+ Control uint16
+}
+
+// LayerType returns gopacket.LayerTypeLLC.
+func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 3 {
+ return errors.New("LLC header too small")
+ }
+ l.DSAP = data[0] & 0xFE
+ l.IG = data[0]&0x1 != 0
+ l.SSAP = data[1] & 0xFE
+ l.CR = data[1]&0x1 != 0
+ l.Control = uint16(data[2])
+
+ if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 {
+ if len(data) < 4 {
+ return errors.New("LLC header too small")
+ }
+ l.Control = l.Control<<8 | uint16(data[3])
+ l.Contents = data[:4]
+ l.Payload = data[4:]
+ } else {
+ l.Contents = data[:3]
+ l.Payload = data[3:]
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *LLC) CanDecode() gopacket.LayerClass {
+ return LayerTypeLLC
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *LLC) NextLayerType() gopacket.LayerType {
+ switch {
+ case l.DSAP == 0xAA && l.SSAP == 0xAA:
+ return LayerTypeSNAP
+ case l.DSAP == 0x42 && l.SSAP == 0x42:
+ return LayerTypeSTP
+ }
+ return gopacket.LayerTypeZero // Not implemented
+}
+
+// SNAP is used inside LLC. See
+// http://standards.ieee.org/getieee802/download/802-2001.pdf.
+// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol:
+// "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing,
+// on networks using IEEE 802.2 LLC, more protocols than can be distinguished
+// by the 8-bit 802.2 Service Access Point (SAP) fields."
+type SNAP struct {
+ BaseLayer
+ OrganizationalCode []byte
+ Type EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeSNAP.
+func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 5 {
+ return errors.New("SNAP header too small")
+ }
+ s.OrganizationalCode = data[:3]
+ s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5]))
+ s.BaseLayer = BaseLayer{data[:5], data[5:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *SNAP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSNAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (s *SNAP) NextLayerType() gopacket.LayerType {
+ // See BUG(gconnel) in decodeSNAP
+ return s.Type.LayerType()
+}
+
+func decodeLLC(data []byte, p gopacket.PacketBuilder) error {
+ l := &LLC{}
+ err := l.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(l)
+ return p.NextDecoder(l.NextLayerType())
+}
+
+func decodeSNAP(data []byte, p gopacket.PacketBuilder) error {
+ s := &SNAP{}
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ // BUG(gconnell): When decoding SNAP, we treat the SNAP type as an Ethernet
+ // type. This may not actually be an ethernet type in all cases,
+ // depending on the organizational code. Right now, we don't check.
+ return p.NextDecoder(s.Type)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var igFlag, crFlag byte
+ var length int
+
+ if l.Control&0xFF00 != 0 {
+ length = 4
+ } else {
+ length = 3
+ }
+
+ if l.DSAP&0x1 != 0 {
+ return errors.New("DSAP value invalid, should not include IG flag bit")
+ }
+
+ if l.SSAP&0x1 != 0 {
+ return errors.New("SSAP value invalid, should not include CR flag bit")
+ }
+
+ if buf, err := b.PrependBytes(length); err != nil {
+ return err
+ } else {
+ igFlag = 0
+ if l.IG {
+ igFlag = 0x1
+ }
+
+ crFlag = 0
+ if l.CR {
+ crFlag = 0x1
+ }
+
+ buf[0] = l.DSAP + igFlag
+ buf[1] = l.SSAP + crFlag
+
+ if length == 4 {
+ buf[2] = uint8(l.Control >> 8)
+ buf[3] = uint8(l.Control)
+ } else {
+ buf[2] = uint8(l.Control)
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if buf, err := b.PrependBytes(5); err != nil {
+ return err
+ } else {
+ buf[0] = s.OrganizationalCode[0]
+ buf[1] = s.OrganizationalCode[1]
+ buf[2] = s.OrganizationalCode[2]
+ binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/google/gopacket/layers/lldp.go
new file mode 100644
index 0000000..e128260
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lldp.go
@@ -0,0 +1,1585 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet.
+type LLDPTLVType byte
+
+const (
+ LLDPTLVEnd LLDPTLVType = 0
+ LLDPTLVChassisID LLDPTLVType = 1
+ LLDPTLVPortID LLDPTLVType = 2
+ LLDPTLVTTL LLDPTLVType = 3
+ LLDPTLVPortDescription LLDPTLVType = 4
+ LLDPTLVSysName LLDPTLVType = 5
+ LLDPTLVSysDescription LLDPTLVType = 6
+ LLDPTLVSysCapabilities LLDPTLVType = 7
+ LLDPTLVMgmtAddress LLDPTLVType = 8
+ LLDPTLVOrgSpecific LLDPTLVType = 127
+)
+
+// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer.
+type LinkLayerDiscoveryValue struct {
+ Type LLDPTLVType
+ Length uint16
+ Value []byte
+}
+
+func (c *LinkLayerDiscoveryValue) len() int {
+ return 0
+}
+
+// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID
+type LLDPChassisIDSubType byte
+
+// LLDP Chassis Types
+const (
+ LLDPChassisIDSubTypeReserved LLDPChassisIDSubType = 0
+ LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1
+ LLDPChassisIDSubtypeIfaceAlias LLDPChassisIDSubType = 2
+ LLDPChassisIDSubTypePortComp LLDPChassisIDSubType = 3
+ LLDPChassisIDSubTypeMACAddr LLDPChassisIDSubType = 4
+ LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5
+ LLDPChassisIDSubtypeIfaceName LLDPChassisIDSubType = 6
+ LLDPChassisIDSubTypeLocal LLDPChassisIDSubType = 7
+)
+
+type LLDPChassisID struct {
+ Subtype LLDPChassisIDSubType
+ ID []byte
+}
+
+func (c *LLDPChassisID) serialize() []byte {
+
+ var buf = make([]byte, c.serializedLen())
+ idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+ binary.BigEndian.PutUint16(buf[0:2], idLen)
+ buf[2] = byte(c.Subtype)
+ copy(buf[3:], c.ID)
+ return buf
+}
+
+func (c *LLDPChassisID) serializedLen() int {
+ return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID
+type LLDPPortIDSubType byte
+
+// LLDP PortID types
+const (
+ LLDPPortIDSubtypeReserved LLDPPortIDSubType = 0
+ LLDPPortIDSubtypeIfaceAlias LLDPPortIDSubType = 1
+ LLDPPortIDSubtypePortComp LLDPPortIDSubType = 2
+ LLDPPortIDSubtypeMACAddr LLDPPortIDSubType = 3
+ LLDPPortIDSubtypeNetworkAddr LLDPPortIDSubType = 4
+ LLDPPortIDSubtypeIfaceName LLDPPortIDSubType = 5
+ LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6
+ LLDPPortIDSubtypeLocal LLDPPortIDSubType = 7
+)
+
+type LLDPPortID struct {
+ Subtype LLDPPortIDSubType
+ ID []byte
+}
+
+func (c *LLDPPortID) serialize() []byte {
+
+ var buf = make([]byte, c.serializedLen())
+ idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+ binary.BigEndian.PutUint16(buf[0:2], idLen)
+ buf[2] = byte(c.Subtype)
+ copy(buf[3:], c.ID)
+ return buf
+}
+
+func (c *LLDPPortID) serializedLen() int {
+ return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol.
+// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf
+// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded
+// with DecodeValues()
+type LinkLayerDiscovery struct {
+ BaseLayer
+ ChassisID LLDPChassisID
+ PortID LLDPPortID
+ TTL uint16
+ Values []LinkLayerDiscoveryValue
+}
+
+type IEEEOUI uint32
+
+// http://standards.ieee.org/develop/regauth/oui/oui.txt
+const (
+ IEEEOUI8021 IEEEOUI = 0x0080c2
+ IEEEOUI8023 IEEEOUI = 0x00120f
+ IEEEOUI80211 IEEEOUI = 0x000fac
+ IEEEOUI8021Qbg IEEEOUI = 0x0013BF
+ IEEEOUICisco2 IEEEOUI = 0x000142
+ IEEEOUIMedia IEEEOUI = 0x0012bb // TR-41
+ IEEEOUIProfinet IEEEOUI = 0x000ecf
+ IEEEOUIDCBX IEEEOUI = 0x001b21
+)
+
+// LLDPOrgSpecificTLV is an Organisation-specific TLV
+type LLDPOrgSpecificTLV struct {
+ OUI IEEEOUI
+ SubType uint8
+ Info []byte
+}
+
+// LLDPCapabilities Types
+const (
+ LLDPCapsOther uint16 = 1 << 0
+ LLDPCapsRepeater uint16 = 1 << 1
+ LLDPCapsBridge uint16 = 1 << 2
+ LLDPCapsWLANAP uint16 = 1 << 3
+ LLDPCapsRouter uint16 = 1 << 4
+ LLDPCapsPhone uint16 = 1 << 5
+ LLDPCapsDocSis uint16 = 1 << 6
+ LLDPCapsStationOnly uint16 = 1 << 7
+ LLDPCapsCVLAN uint16 = 1 << 8
+ LLDPCapsSVLAN uint16 = 1 << 9
+ LLDPCapsTmpr uint16 = 1 << 10
+)
+
+// LLDPCapabilities represents the capabilities of a device
+type LLDPCapabilities struct {
+ Other bool
+ Repeater bool
+ Bridge bool
+ WLANAP bool
+ Router bool
+ Phone bool
+ DocSis bool
+ StationOnly bool
+ CVLAN bool
+ SVLAN bool
+ TMPR bool
+}
+
+type LLDPSysCapabilities struct {
+ SystemCap LLDPCapabilities
+ EnabledCap LLDPCapabilities
+}
+
+type IANAAddressFamily byte
+
+// LLDP Management Address Subtypes
+// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml
+const (
+ IANAAddressFamilyReserved IANAAddressFamily = 0
+ IANAAddressFamilyIPV4 IANAAddressFamily = 1
+ IANAAddressFamilyIPV6 IANAAddressFamily = 2
+ IANAAddressFamilyNSAP IANAAddressFamily = 3
+ IANAAddressFamilyHDLC IANAAddressFamily = 4
+ IANAAddressFamilyBBN1822 IANAAddressFamily = 5
+ IANAAddressFamily802 IANAAddressFamily = 6
+ IANAAddressFamilyE163 IANAAddressFamily = 7
+ IANAAddressFamilyE164 IANAAddressFamily = 8
+ IANAAddressFamilyF69 IANAAddressFamily = 9
+ IANAAddressFamilyX121 IANAAddressFamily = 10
+ IANAAddressFamilyIPX IANAAddressFamily = 11
+ IANAAddressFamilyAtalk IANAAddressFamily = 12
+ IANAAddressFamilyDecnet IANAAddressFamily = 13
+ IANAAddressFamilyBanyan IANAAddressFamily = 14
+ IANAAddressFamilyE164NSAP IANAAddressFamily = 15
+ IANAAddressFamilyDNS IANAAddressFamily = 16
+ IANAAddressFamilyDistname IANAAddressFamily = 17
+ IANAAddressFamilyASNumber IANAAddressFamily = 18
+ IANAAddressFamilyXTPIPV4 IANAAddressFamily = 19
+ IANAAddressFamilyXTPIPV6 IANAAddressFamily = 20
+ IANAAddressFamilyXTP IANAAddressFamily = 21
+ IANAAddressFamilyFcWWPN IANAAddressFamily = 22
+ IANAAddressFamilyFcWWNN IANAAddressFamily = 23
+ IANAAddressFamilyGWID IANAAddressFamily = 24
+ IANAAddressFamilyL2VPN IANAAddressFamily = 25
+)
+
+type LLDPInterfaceSubtype byte
+
+// LLDP Interface Subtypes
+const (
+ LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1
+ LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2
+ LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3
+)
+
+type LLDPMgmtAddress struct {
+ Subtype IANAAddressFamily
+ Address []byte
+ InterfaceSubtype LLDPInterfaceSubtype
+ InterfaceNumber uint32
+ OID string
+}
+
+// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues
+// Organisation-specific TLV's can be decoded using the various Decode() methods
+type LinkLayerDiscoveryInfo struct {
+ BaseLayer
+ PortDescription string
+ SysName string
+ SysDescription string
+ SysCapabilities LLDPSysCapabilities
+ MgmtAddress LLDPMgmtAddress
+ OrgTLVs []LLDPOrgSpecificTLV // Private TLVs
+ Unknown []LinkLayerDiscoveryValue // undecoded TLVs
+}
+
+/// IEEE 802.1 TLV Subtypes
+const (
+ LLDP8021SubtypePortVLANID uint8 = 1
+ LLDP8021SubtypeProtocolVLANID uint8 = 2
+ LLDP8021SubtypeVLANName uint8 = 3
+ LLDP8021SubtypeProtocolIdentity uint8 = 4
+ LLDP8021SubtypeVDIUsageDigest uint8 = 5
+ LLDP8021SubtypeManagementVID uint8 = 6
+ LLDP8021SubtypeLinkAggregation uint8 = 7
+)
+
+// VLAN Port Protocol ID options
+const (
+ LLDPProtocolVLANIDCapability byte = 1 << 1
+ LLDPProtocolVLANIDStatus byte = 1 << 2
+)
+
+type PortProtocolVLANID struct {
+ Supported bool
+ Enabled bool
+ ID uint16
+}
+
+type VLANName struct {
+ ID uint16
+ Name string
+}
+
+type ProtocolIdentity []byte
+
+// LACP options
+const (
+ LLDPAggregationCapability byte = 1 << 0
+ LLDPAggregationStatus byte = 1 << 1
+)
+
+// IEEE 802 Link Aggregation parameters
+type LLDPLinkAggregation struct {
+ Supported bool
+ Enabled bool
+ PortID uint32
+}
+
+// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs
+type LLDPInfo8021 struct {
+ PVID uint16
+ PPVIDs []PortProtocolVLANID
+ VLANNames []VLANName
+ ProtocolIdentities []ProtocolIdentity
+ VIDUsageDigest uint32
+ ManagementVID uint16
+ LinkAggregation LLDPLinkAggregation
+}
+
+// IEEE 802.3 TLV Subtypes
+const (
+ LLDP8023SubtypeMACPHY uint8 = 1
+ LLDP8023SubtypeMDIPower uint8 = 2
+ LLDP8023SubtypeLinkAggregation uint8 = 3
+ LLDP8023SubtypeMTU uint8 = 4
+)
+
+// MACPHY options
+const (
+ LLDPMACPHYCapability byte = 1 << 0
+ LLDPMACPHYStatus byte = 1 << 1
+)
+
+// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType
+const (
+ LLDPMAUTypeUnknown uint16 = 0
+ LLDPMAUTypeAUI uint16 = 1
+ LLDPMAUType10Base5 uint16 = 2
+ LLDPMAUTypeFOIRL uint16 = 3
+ LLDPMAUType10Base2 uint16 = 4
+ LLDPMAUType10BaseT uint16 = 5
+ LLDPMAUType10BaseFP uint16 = 6
+ LLDPMAUType10BaseFB uint16 = 7
+ LLDPMAUType10BaseFL uint16 = 8
+ LLDPMAUType10BROAD36 uint16 = 9
+ LLDPMAUType10BaseT_HD uint16 = 10
+ LLDPMAUType10BaseT_FD uint16 = 11
+ LLDPMAUType10BaseFL_HD uint16 = 12
+ LLDPMAUType10BaseFL_FD uint16 = 13
+ LLDPMAUType100BaseT4 uint16 = 14
+ LLDPMAUType100BaseTX_HD uint16 = 15
+ LLDPMAUType100BaseTX_FD uint16 = 16
+ LLDPMAUType100BaseFX_HD uint16 = 17
+ LLDPMAUType100BaseFX_FD uint16 = 18
+ LLDPMAUType100BaseT2_HD uint16 = 19
+ LLDPMAUType100BaseT2_FD uint16 = 20
+ LLDPMAUType1000BaseX_HD uint16 = 21
+ LLDPMAUType1000BaseX_FD uint16 = 22
+ LLDPMAUType1000BaseLX_HD uint16 = 23
+ LLDPMAUType1000BaseLX_FD uint16 = 24
+ LLDPMAUType1000BaseSX_HD uint16 = 25
+ LLDPMAUType1000BaseSX_FD uint16 = 26
+ LLDPMAUType1000BaseCX_HD uint16 = 27
+ LLDPMAUType1000BaseCX_FD uint16 = 28
+ LLDPMAUType1000BaseT_HD uint16 = 29
+ LLDPMAUType1000BaseT_FD uint16 = 30
+ LLDPMAUType10GBaseX uint16 = 31
+ LLDPMAUType10GBaseLX4 uint16 = 32
+ LLDPMAUType10GBaseR uint16 = 33
+ LLDPMAUType10GBaseER uint16 = 34
+ LLDPMAUType10GBaseLR uint16 = 35
+ LLDPMAUType10GBaseSR uint16 = 36
+ LLDPMAUType10GBaseW uint16 = 37
+ LLDPMAUType10GBaseEW uint16 = 38
+ LLDPMAUType10GBaseLW uint16 = 39
+ LLDPMAUType10GBaseSW uint16 = 40
+ LLDPMAUType10GBaseCX4 uint16 = 41
+ LLDPMAUType2BaseTL uint16 = 42
+ LLDPMAUType10PASS_TS uint16 = 43
+ LLDPMAUType100BaseBX10D uint16 = 44
+ LLDPMAUType100BaseBX10U uint16 = 45
+ LLDPMAUType100BaseLX10 uint16 = 46
+ LLDPMAUType1000BaseBX10D uint16 = 47
+ LLDPMAUType1000BaseBX10U uint16 = 48
+ LLDPMAUType1000BaseLX10 uint16 = 49
+ LLDPMAUType1000BasePX10D uint16 = 50
+ LLDPMAUType1000BasePX10U uint16 = 51
+ LLDPMAUType1000BasePX20D uint16 = 52
+ LLDPMAUType1000BasePX20U uint16 = 53
+ LLDPMAUType10GBaseT uint16 = 54
+ LLDPMAUType10GBaseLRM uint16 = 55
+ LLDPMAUType1000BaseKX uint16 = 56
+ LLDPMAUType10GBaseKX4 uint16 = 57
+ LLDPMAUType10GBaseKR uint16 = 58
+ LLDPMAUType10_1GBasePRX_D1 uint16 = 59
+ LLDPMAUType10_1GBasePRX_D2 uint16 = 60
+ LLDPMAUType10_1GBasePRX_D3 uint16 = 61
+ LLDPMAUType10_1GBasePRX_U1 uint16 = 62
+ LLDPMAUType10_1GBasePRX_U2 uint16 = 63
+ LLDPMAUType10_1GBasePRX_U3 uint16 = 64
+ LLDPMAUType10GBasePR_D1 uint16 = 65
+ LLDPMAUType10GBasePR_D2 uint16 = 66
+ LLDPMAUType10GBasePR_D3 uint16 = 67
+ LLDPMAUType10GBasePR_U1 uint16 = 68
+ LLDPMAUType10GBasePR_U3 uint16 = 69
+)
+
+// From RFC 3636 - ifMauAutoNegCapAdvertisedBits
+const (
+ LLDPMAUPMDOther uint16 = 1 << 15
+ LLDPMAUPMD10BaseT uint16 = 1 << 14
+ LLDPMAUPMD10BaseT_FD uint16 = 1 << 13
+ LLDPMAUPMD100BaseT4 uint16 = 1 << 12
+ LLDPMAUPMD100BaseTX uint16 = 1 << 11
+ LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10
+ LLDPMAUPMD100BaseT2 uint16 = 1 << 9
+ LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8
+ LLDPMAUPMDFDXPAUSE uint16 = 1 << 7
+ LLDPMAUPMDFDXAPAUSE uint16 = 1 << 6
+ LLDPMAUPMDFDXSPAUSE uint16 = 1 << 5
+ LLDPMAUPMDFDXBPAUSE uint16 = 1 << 4
+ LLDPMAUPMD1000BaseX uint16 = 1 << 3
+ LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2
+ LLDPMAUPMD1000BaseT uint16 = 1 << 1
+ LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0
+)
+
+// Inverted ifMauAutoNegCapAdvertisedBits if required
+// (Some manufacturers misinterpreted the spec -
+// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455)
+const (
+ LLDPMAUPMDOtherInv uint16 = 1 << 0
+ LLDPMAUPMD10BaseTInv uint16 = 1 << 1
+ LLDPMAUPMD10BaseT_FDInv uint16 = 1 << 2
+ LLDPMAUPMD100BaseT4Inv uint16 = 1 << 3
+ LLDPMAUPMD100BaseTXInv uint16 = 1 << 4
+ LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5
+ LLDPMAUPMD100BaseT2Inv uint16 = 1 << 6
+ LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7
+ LLDPMAUPMDFDXPAUSEInv uint16 = 1 << 8
+ LLDPMAUPMDFDXAPAUSEInv uint16 = 1 << 9
+ LLDPMAUPMDFDXSPAUSEInv uint16 = 1 << 10
+ LLDPMAUPMDFDXBPAUSEInv uint16 = 1 << 11
+ LLDPMAUPMD1000BaseXInv uint16 = 1 << 12
+ LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13
+ LLDPMAUPMD1000BaseTInv uint16 = 1 << 14
+ LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15
+)
+
+type LLDPMACPHYConfigStatus struct {
+ AutoNegSupported bool
+ AutoNegEnabled bool
+ AutoNegCapability uint16
+ MAUType uint16
+}
+
+// MDI Power options
+const (
+ LLDPMDIPowerPortClass byte = 1 << 0
+ LLDPMDIPowerCapability byte = 1 << 1
+ LLDPMDIPowerStatus byte = 1 << 2
+ LLDPMDIPowerPairsAbility byte = 1 << 3
+)
+
+type LLDPPowerType byte
+
+type LLDPPowerSource byte
+
+type LLDPPowerPriority byte
+
+const (
+ LLDPPowerPriorityUnknown LLDPPowerPriority = 0
+ LLDPPowerPriorityMedium LLDPPowerPriority = 1
+ LLDPPowerPriorityHigh LLDPPowerPriority = 2
+ LLDPPowerPriorityLow LLDPPowerPriority = 3
+)
+
+type LLDPPowerViaMDI8023 struct {
+ PortClassPSE bool // false = PD
+ PSESupported bool
+ PSEEnabled bool
+ PSEPairsAbility bool
+ PSEPowerPair uint8
+ PSEClass uint8
+ Type LLDPPowerType
+ Source LLDPPowerSource
+ Priority LLDPPowerPriority
+ Requested uint16 // 1-510 Watts
+ Allocated uint16 // 1-510 Watts
+}
+
+// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs
+type LLDPInfo8023 struct {
+ MACPHYConfigStatus LLDPMACPHYConfigStatus
+ PowerViaMDI LLDPPowerViaMDI8023
+ LinkAggregation LLDPLinkAggregation
+ MTU uint16
+}
+
+// IEEE 802.1Qbg TLV Subtypes
+const (
+ LLDP8021QbgEVB uint8 = 0
+ LLDP8021QbgCDCP uint8 = 1
+ LLDP8021QbgVDP uint8 = 2
+ LLDP8021QbgEVB22 uint8 = 13
+)
+
+// LLDPEVBCapabilities Types
+const (
+ LLDPEVBCapsSTD uint16 = 1 << 7
+ LLDPEVBCapsRR uint16 = 1 << 6
+ LLDPEVBCapsRTE uint16 = 1 << 2
+ LLDPEVBCapsECP uint16 = 1 << 1
+ LLDPEVBCapsVDP uint16 = 1 << 0
+)
+
+// LLDPEVBCapabilities represents the EVB capabilities of a device
+type LLDPEVBCapabilities struct {
+ StandardBridging bool
+ ReflectiveRelay bool
+ RetransmissionTimerExponent bool
+ EdgeControlProtocol bool
+ VSIDiscoveryProtocol bool
+}
+
+type LLDPEVBSettings struct {
+ Supported LLDPEVBCapabilities
+ Enabled LLDPEVBCapabilities
+ SupportedVSIs uint16
+ ConfiguredVSIs uint16
+ RTEExponent uint8
+}
+
+// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs
+type LLDPInfo8021Qbg struct {
+ EVBSettings LLDPEVBSettings
+}
+
+type LLDPMediaSubtype uint8
+
+// Media TLV Subtypes
+const (
+ LLDPMediaTypeCapabilities LLDPMediaSubtype = 1
+ LLDPMediaTypeNetwork LLDPMediaSubtype = 2
+ LLDPMediaTypeLocation LLDPMediaSubtype = 3
+ LLDPMediaTypePower LLDPMediaSubtype = 4
+ LLDPMediaTypeHardware LLDPMediaSubtype = 5
+ LLDPMediaTypeFirmware LLDPMediaSubtype = 6
+ LLDPMediaTypeSoftware LLDPMediaSubtype = 7
+ LLDPMediaTypeSerial LLDPMediaSubtype = 8
+ LLDPMediaTypeManufacturer LLDPMediaSubtype = 9
+ LLDPMediaTypeModel LLDPMediaSubtype = 10
+ LLDPMediaTypeAssetID LLDPMediaSubtype = 11
+)
+
+type LLDPMediaClass uint8
+
+// Media Class Values
+const (
+ LLDPMediaClassUndefined LLDPMediaClass = 0
+ LLDPMediaClassEndpointI LLDPMediaClass = 1
+ LLDPMediaClassEndpointII LLDPMediaClass = 2
+ LLDPMediaClassEndpointIII LLDPMediaClass = 3
+ LLDPMediaClassNetwork LLDPMediaClass = 4
+)
+
+// LLDPMediaCapabilities Types
+const (
+ LLDPMediaCapsLLDP uint16 = 1 << 0
+ LLDPMediaCapsNetwork uint16 = 1 << 1
+ LLDPMediaCapsLocation uint16 = 1 << 2
+ LLDPMediaCapsPowerPSE uint16 = 1 << 3
+ LLDPMediaCapsPowerPD uint16 = 1 << 4
+ LLDPMediaCapsInventory uint16 = 1 << 5
+)
+
+// LLDPMediaCapabilities represents the LLDP Media capabilities of a device
+type LLDPMediaCapabilities struct {
+ Capabilities bool
+ NetworkPolicy bool
+ Location bool
+ PowerPSE bool
+ PowerPD bool
+ Inventory bool
+ Class LLDPMediaClass
+}
+
+type LLDPApplicationType uint8
+
+const (
+ LLDPAppTypeReserved LLDPApplicationType = 0
+ LLDPAppTypeVoice LLDPApplicationType = 1
+ LLDPappTypeVoiceSignaling LLDPApplicationType = 2
+ LLDPappTypeGuestVoice LLDPApplicationType = 3
+ LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4
+ LLDPappTypeSoftphoneVoice LLDPApplicationType = 5
+ LLDPappTypeVideoConferencing LLDPApplicationType = 6
+ LLDPappTypeStreamingVideo LLDPApplicationType = 7
+ LLDPappTypeVideoSignaling LLDPApplicationType = 8
+)
+
+type LLDPNetworkPolicy struct {
+ ApplicationType LLDPApplicationType
+ Defined bool
+ Tagged bool
+ VLANId uint16
+ L2Priority uint16
+ DSCPValue uint8
+}
+
+type LLDPLocationFormat uint8
+
+const (
+ LLDPLocationFormatInvalid LLDPLocationFormat = 0
+ LLDPLocationFormatCoordinate LLDPLocationFormat = 1
+ LLDPLocationFormatAddress LLDPLocationFormat = 2
+ LLDPLocationFormatECS LLDPLocationFormat = 3
+)
+
+type LLDPLocationAddressWhat uint8
+
+const (
+ LLDPLocationAddressWhatDHCP LLDPLocationAddressWhat = 0
+ LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1
+ LLDPLocationAddressWhatClient LLDPLocationAddressWhat = 2
+)
+
+type LLDPLocationAddressType uint8
+
+const (
+ LLDPLocationAddressTypeLanguage LLDPLocationAddressType = 0
+ LLDPLocationAddressTypeNational LLDPLocationAddressType = 1
+ LLDPLocationAddressTypeCounty LLDPLocationAddressType = 2
+ LLDPLocationAddressTypeCity LLDPLocationAddressType = 3
+ LLDPLocationAddressTypeCityDivision LLDPLocationAddressType = 4
+ LLDPLocationAddressTypeNeighborhood LLDPLocationAddressType = 5
+ LLDPLocationAddressTypeStreet LLDPLocationAddressType = 6
+ LLDPLocationAddressTypeLeadingStreet LLDPLocationAddressType = 16
+ LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17
+ LLDPLocationAddressTypeStreetSuffix LLDPLocationAddressType = 18
+ LLDPLocationAddressTypeHouseNum LLDPLocationAddressType = 19
+ LLDPLocationAddressTypeHouseSuffix LLDPLocationAddressType = 20
+ LLDPLocationAddressTypeLandmark LLDPLocationAddressType = 21
+ LLDPLocationAddressTypeAdditional LLDPLocationAddressType = 22
+ LLDPLocationAddressTypeName LLDPLocationAddressType = 23
+ LLDPLocationAddressTypePostal LLDPLocationAddressType = 24
+ LLDPLocationAddressTypeBuilding LLDPLocationAddressType = 25
+ LLDPLocationAddressTypeUnit LLDPLocationAddressType = 26
+ LLDPLocationAddressTypeFloor LLDPLocationAddressType = 27
+ LLDPLocationAddressTypeRoom LLDPLocationAddressType = 28
+ LLDPLocationAddressTypePlace LLDPLocationAddressType = 29
+ LLDPLocationAddressTypeScript LLDPLocationAddressType = 128
+)
+
+type LLDPLocationCoordinate struct {
+ LatitudeResolution uint8
+ Latitude uint64
+ LongitudeResolution uint8
+ Longitude uint64
+ AltitudeType uint8
+ AltitudeResolution uint16
+ Altitude uint32
+ Datum uint8
+}
+
+type LLDPLocationAddressLine struct {
+ Type LLDPLocationAddressType
+ Value string
+}
+
+type LLDPLocationAddress struct {
+ What LLDPLocationAddressWhat
+ CountryCode string
+ AddressLines []LLDPLocationAddressLine
+}
+
+type LLDPLocationECS struct {
+ ELIN string
+}
+
+// LLDP represents a physical location.
+// Only one of the embedded types will contain values, depending on Format.
+type LLDPLocation struct {
+ Format LLDPLocationFormat
+ Coordinate LLDPLocationCoordinate
+ Address LLDPLocationAddress
+ ECS LLDPLocationECS
+}
+
+type LLDPPowerViaMDI struct {
+ Type LLDPPowerType
+ Source LLDPPowerSource
+ Priority LLDPPowerPriority
+ Value uint16
+}
+
+// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs
+type LLDPInfoMedia struct {
+ MediaCapabilities LLDPMediaCapabilities
+ NetworkPolicy LLDPNetworkPolicy
+ Location LLDPLocation
+ PowerViaMDI LLDPPowerViaMDI
+ HardwareRevision string
+ FirmwareRevision string
+ SoftwareRevision string
+ SerialNumber string
+ Manufacturer string
+ Model string
+ AssetID string
+}
+
+type LLDPCisco2Subtype uint8
+
+// Cisco2 TLV Subtypes
+const (
+ LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1
+)
+
+const (
+ LLDPCiscoPSESupport uint8 = 1 << 0
+ LLDPCiscoArchShared uint8 = 1 << 1
+ LLDPCiscoPDSparePair uint8 = 1 << 2
+ LLDPCiscoPSESparePair uint8 = 1 << 3
+)
+
+// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs
+type LLDPInfoCisco2 struct {
+ PSEFourWirePoESupported bool
+ PDSparePairArchitectureShared bool
+ PDRequestSparePairPoEOn bool
+ PSESparePairPoEOn bool
+}
+
+// Profinet Subtypes
+type LLDPProfinetSubtype uint8
+
+const (
+ LLDPProfinetPNIODelay LLDPProfinetSubtype = 1
+ LLDPProfinetPNIOPortStatus LLDPProfinetSubtype = 2
+ LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4
+ LLDPProfinetPNIOChassisMAC LLDPProfinetSubtype = 5
+ LLDPProfinetPNIOPTCPStatus LLDPProfinetSubtype = 6
+)
+
+type LLDPPNIODelay struct {
+ RXLocal uint32
+ RXRemote uint32
+ TXLocal uint32
+ TXRemote uint32
+ CableLocal uint32
+}
+
+type LLDPPNIOPortStatus struct {
+ Class2 uint16
+ Class3 uint16
+}
+
+type LLDPPNIOMRPPortStatus struct {
+ UUID []byte
+ Status uint16
+}
+
+type LLDPPNIOPTCPStatus struct {
+ MasterAddress []byte
+ SubdomainUUID []byte
+ IRDataUUID []byte
+ PeriodValid bool
+ PeriodLength uint32
+ RedPeriodValid bool
+ RedPeriodBegin uint32
+ OrangePeriodValid bool
+ OrangePeriodBegin uint32
+ GreenPeriodValid bool
+ GreenPeriodBegin uint32
+}
+
+// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs
+type LLDPInfoProfinet struct {
+ PNIODelay LLDPPNIODelay
+ PNIOPortStatus LLDPPNIOPortStatus
+ PNIOMRPPortStatus LLDPPNIOMRPPortStatus
+ ChassisMAC []byte
+ PNIOPTCPStatus LLDPPNIOPTCPStatus
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscovery.
+func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeLinkLayerDiscovery
+}
+
+// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer.
+func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ chassIDLen := c.ChassisID.serializedLen()
+ portIDLen := c.PortID.serializedLen()
+ vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL
+ if err != nil {
+ return err
+ }
+ copy(vb[:chassIDLen], c.ChassisID.serialize())
+ copy(vb[chassIDLen:], c.PortID.serialize())
+ ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2)
+ binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen)
+ binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL)
+
+ vb, err = b.AppendBytes(2) // End Tlv, 2 bytes
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero
+ return nil
+
+}
+
+func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ var vals []LinkLayerDiscoveryValue
+ vData := data[0:]
+ for len(vData) > 0 {
+ nbit := vData[0] & 0x01
+ t := LLDPTLVType(vData[0] >> 1)
+ val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])}
+ if val.Length > 0 {
+ val.Value = vData[2 : val.Length+2]
+ }
+ vals = append(vals, val)
+ if t == LLDPTLVEnd {
+ break
+ }
+ if len(vData) < int(2+val.Length) {
+ return errors.New("Malformed LinkLayerDiscovery Header")
+ }
+ vData = vData[2+val.Length:]
+ }
+ if len(vals) < 4 {
+ return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+ }
+ c := &LinkLayerDiscovery{}
+ gotEnd := false
+ for _, v := range vals {
+ switch v.Type {
+ case LLDPTLVEnd:
+ gotEnd = true
+ case LLDPTLVChassisID:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery ChassisID TLV")
+ }
+ c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0])
+ c.ChassisID.ID = v.Value[1:]
+ case LLDPTLVPortID:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery PortID TLV")
+ }
+ c.PortID.Subtype = LLDPPortIDSubType(v.Value[0])
+ c.PortID.ID = v.Value[1:]
+ case LLDPTLVTTL:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery TTL TLV")
+ }
+ c.TTL = binary.BigEndian.Uint16(v.Value[0:2])
+ default:
+ c.Values = append(c.Values, v)
+ }
+ }
+ if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd {
+ return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+ }
+ c.Contents = data
+ p.AddLayer(c)
+
+ info := &LinkLayerDiscoveryInfo{}
+ p.AddLayer(info)
+ for _, v := range c.Values {
+ switch v.Type {
+ case LLDPTLVPortDescription:
+ info.PortDescription = string(v.Value)
+ case LLDPTLVSysName:
+ info.SysName = string(v.Value)
+ case LLDPTLVSysDescription:
+ info.SysDescription = string(v.Value)
+ case LLDPTLVSysCapabilities:
+ if err := checkLLDPTLVLen(v, 4); err != nil {
+ return err
+ }
+ info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2]))
+ info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4]))
+ case LLDPTLVMgmtAddress:
+ if err := checkLLDPTLVLen(v, 9); err != nil {
+ return err
+ }
+ mlen := v.Value[0]
+ if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil {
+ return err
+ }
+ info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1])
+ info.MgmtAddress.Address = v.Value[2 : mlen+1]
+ info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1])
+ info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6])
+ olen := v.Value[mlen+6]
+ if err := checkLLDPTLVLen(v, int(mlen+6+olen)); err != nil {
+ return err
+ }
+ info.MgmtAddress.OID = string(v.Value[mlen+9 : mlen+9+olen])
+ case LLDPTLVOrgSpecific:
+ if err := checkLLDPTLVLen(v, 4); err != nil {
+ return err
+ }
+ info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]})
+ }
+ }
+ return nil
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8021 {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8021SubtypePortVLANID:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.PVID = binary.BigEndian.Uint16(o.Info[0:2])
+ case LLDP8021SubtypeProtocolVLANID:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0)
+ en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0)
+ id := binary.BigEndian.Uint16(o.Info[1:3])
+ info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id})
+ case LLDP8021SubtypeVLANName:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ id := binary.BigEndian.Uint16(o.Info[0:2])
+ info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])})
+ case LLDP8021SubtypeProtocolIdentity:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ l := int(o.Info[0])
+ if l > 0 {
+ info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l])
+ }
+ case LLDP8021SubtypeVDIUsageDigest:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4])
+ case LLDP8021SubtypeManagementVID:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2])
+ case LLDP8021SubtypeLinkAggregation:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPAggregationCapability > 0)
+ en := (o.Info[0]&LLDPAggregationStatus > 0)
+ info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8023 {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8023SubtypeMACPHY:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPMACPHYCapability > 0)
+ en := (o.Info[0]&LLDPMACPHYStatus > 0)
+ ca := binary.BigEndian.Uint16(o.Info[1:3])
+ mau := binary.BigEndian.Uint16(o.Info[3:5])
+ info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau}
+ case LLDP8023SubtypeMDIPower:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0)
+ info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0)
+ info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0)
+ info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0)
+ info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1])
+ info.PowerViaMDI.PSEClass = uint8(o.Info[2])
+ if len(o.Info) >= 7 {
+ info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6)
+ info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4)
+ if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+ info.PowerViaMDI.Source += 128 // For Stringify purposes
+ }
+ info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f)
+ info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6])
+ info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8])
+ }
+ case LLDP8023SubtypeLinkAggregation:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPAggregationCapability > 0)
+ en := (o.Info[0]&LLDPAggregationStatus > 0)
+ info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+ case LLDP8023SubtypeMTU:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.MTU = binary.BigEndian.Uint16(o.Info[0:2])
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8021Qbg {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8021QbgEVB:
+ if err = checkLLDPOrgSpecificLen(o, 9); err != nil {
+ return
+ }
+ info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2]))
+ info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4]))
+ info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6])
+ info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8])
+ info.EVBSettings.RTEExponent = uint8(o.Info[8])
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUIMedia {
+ continue
+ }
+ switch LLDPMediaSubtype(o.SubType) {
+ case LLDPMediaTypeCapabilities:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ b := binary.BigEndian.Uint16(o.Info[0:2])
+ info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0
+ info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0
+ info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0
+ info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0
+ info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0
+ info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0
+ info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2])
+ case LLDPMediaTypeNetwork:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0])
+ b := binary.BigEndian.Uint16(o.Info[1:3])
+ info.NetworkPolicy.Defined = (b & 0x8000) == 0
+ info.NetworkPolicy.Tagged = (b & 0x4000) > 0
+ info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1
+ b = binary.BigEndian.Uint16(o.Info[2:4])
+ info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6
+ info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f)
+ case LLDPMediaTypeLocation:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ info.Location.Format = LLDPLocationFormat(o.Info[0])
+ o.Info = o.Info[1:]
+ switch info.Location.Format {
+ case LLDPLocationFormatCoordinate:
+ if err = checkLLDPOrgSpecificLen(o, 16); err != nil {
+ return
+ }
+ info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2
+ b := binary.BigEndian.Uint64(o.Info[0:8])
+ info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24
+ info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2
+ b = binary.BigEndian.Uint64(o.Info[5:13])
+ info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24
+ info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4)
+ b1 := binary.BigEndian.Uint16(o.Info[10:12])
+ info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6
+ b2 := binary.BigEndian.Uint32(o.Info[11:15])
+ info.Location.Coordinate.Altitude = b2 & 0x3fffffff
+ info.Location.Coordinate.Datum = uint8(o.Info[15])
+ case LLDPLocationFormatAddress:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ //ll := uint8(o.Info[0])
+ info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1])
+ info.Location.Address.CountryCode = string(o.Info[2:4])
+ data := o.Info[4:]
+ for len(data) > 1 {
+ aType := LLDPLocationAddressType(data[0])
+ aLen := int(data[1])
+ if len(data) >= aLen+2 {
+ info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])})
+ data = data[aLen+2:]
+ } else {
+ break
+ }
+ }
+ case LLDPLocationFormatECS:
+ info.Location.ECS.ELIN = string(o.Info)
+ }
+ case LLDPMediaTypePower:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6)
+ info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4)
+ if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+ info.PowerViaMDI.Source += 128 // For Stringify purposes
+ }
+ info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f)
+ info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments
+ case LLDPMediaTypeHardware:
+ info.HardwareRevision = string(o.Info)
+ case LLDPMediaTypeFirmware:
+ info.FirmwareRevision = string(o.Info)
+ case LLDPMediaTypeSoftware:
+ info.SoftwareRevision = string(o.Info)
+ case LLDPMediaTypeSerial:
+ info.SerialNumber = string(o.Info)
+ case LLDPMediaTypeManufacturer:
+ info.Manufacturer = string(o.Info)
+ case LLDPMediaTypeModel:
+ info.Model = string(o.Info)
+ case LLDPMediaTypeAssetID:
+ info.AssetID = string(o.Info)
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUICisco2 {
+ continue
+ }
+ switch LLDPCisco2Subtype(o.SubType) {
+ case LLDPCisco2PowerViaMDI:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0
+ info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0
+ info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0
+ info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUIProfinet {
+ continue
+ }
+ switch LLDPProfinetSubtype(o.SubType) {
+ case LLDPProfinetPNIODelay:
+ if err = checkLLDPOrgSpecificLen(o, 20); err != nil {
+ return
+ }
+ info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4])
+ info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8])
+ info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12])
+ info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16])
+ info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20])
+ case LLDPProfinetPNIOPortStatus:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2])
+ info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4])
+ case LLDPProfinetPNIOMRPPortStatus:
+ if err = checkLLDPOrgSpecificLen(o, 18); err != nil {
+ return
+ }
+ info.PNIOMRPPortStatus.UUID = o.Info[0:16]
+ info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18])
+ case LLDPProfinetPNIOChassisMAC:
+ if err = checkLLDPOrgSpecificLen(o, 6); err != nil {
+ return
+ }
+ info.ChassisMAC = o.Info[0:6]
+ case LLDPProfinetPNIOPTCPStatus:
+ if err = checkLLDPOrgSpecificLen(o, 54); err != nil {
+ return
+ }
+ info.PNIOPTCPStatus.MasterAddress = o.Info[0:6]
+ info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22]
+ info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38]
+ b := binary.BigEndian.Uint32(o.Info[38:42])
+ info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[42:46])
+ info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[46:50])
+ info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[50:54])
+ info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff
+ }
+ }
+ return
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo.
+func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType {
+ return LayerTypeLinkLayerDiscoveryInfo
+}
+
+func getCapabilities(v uint16) (c LLDPCapabilities) {
+ c.Other = (v&LLDPCapsOther > 0)
+ c.Repeater = (v&LLDPCapsRepeater > 0)
+ c.Bridge = (v&LLDPCapsBridge > 0)
+ c.WLANAP = (v&LLDPCapsWLANAP > 0)
+ c.Router = (v&LLDPCapsRouter > 0)
+ c.Phone = (v&LLDPCapsPhone > 0)
+ c.DocSis = (v&LLDPCapsDocSis > 0)
+ c.StationOnly = (v&LLDPCapsStationOnly > 0)
+ c.CVLAN = (v&LLDPCapsCVLAN > 0)
+ c.SVLAN = (v&LLDPCapsSVLAN > 0)
+ c.TMPR = (v&LLDPCapsTmpr > 0)
+ return
+}
+
+func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) {
+ c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+ c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+ c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0
+ c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0
+ c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0
+ c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0
+ return
+}
+
+func (t LLDPTLVType) String() (s string) {
+ switch t {
+ case LLDPTLVEnd:
+ s = "TLV End"
+ case LLDPTLVChassisID:
+ s = "Chassis ID"
+ case LLDPTLVPortID:
+ s = "Port ID"
+ case LLDPTLVTTL:
+ s = "TTL"
+ case LLDPTLVPortDescription:
+ s = "Port Description"
+ case LLDPTLVSysName:
+ s = "System Name"
+ case LLDPTLVSysDescription:
+ s = "System Description"
+ case LLDPTLVSysCapabilities:
+ s = "System Capabilities"
+ case LLDPTLVMgmtAddress:
+ s = "Management Address"
+ case LLDPTLVOrgSpecific:
+ s = "Organisation Specific"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPChassisIDSubType) String() (s string) {
+ switch t {
+ case LLDPChassisIDSubTypeReserved:
+ s = "Reserved"
+ case LLDPChassisIDSubTypeChassisComp:
+ s = "Chassis Component"
+ case LLDPChassisIDSubtypeIfaceAlias:
+ s = "Interface Alias"
+ case LLDPChassisIDSubTypePortComp:
+ s = "Port Component"
+ case LLDPChassisIDSubTypeMACAddr:
+ s = "MAC Address"
+ case LLDPChassisIDSubTypeNetworkAddr:
+ s = "Network Address"
+ case LLDPChassisIDSubtypeIfaceName:
+ s = "Interface Name"
+ case LLDPChassisIDSubTypeLocal:
+ s = "Local"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPortIDSubType) String() (s string) {
+ switch t {
+ case LLDPPortIDSubtypeReserved:
+ s = "Reserved"
+ case LLDPPortIDSubtypeIfaceAlias:
+ s = "Interface Alias"
+ case LLDPPortIDSubtypePortComp:
+ s = "Port Component"
+ case LLDPPortIDSubtypeMACAddr:
+ s = "MAC Address"
+ case LLDPPortIDSubtypeNetworkAddr:
+ s = "Network Address"
+ case LLDPPortIDSubtypeIfaceName:
+ s = "Interface Name"
+ case LLDPPortIDSubtypeAgentCircuitID:
+ s = "Agent Circuit ID"
+ case LLDPPortIDSubtypeLocal:
+ s = "Local"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t IANAAddressFamily) String() (s string) {
+ switch t {
+ case IANAAddressFamilyReserved:
+ s = "Reserved"
+ case IANAAddressFamilyIPV4:
+ s = "IPv4"
+ case IANAAddressFamilyIPV6:
+ s = "IPv6"
+ case IANAAddressFamilyNSAP:
+ s = "NSAP"
+ case IANAAddressFamilyHDLC:
+ s = "HDLC"
+ case IANAAddressFamilyBBN1822:
+ s = "BBN 1822"
+ case IANAAddressFamily802:
+ s = "802 media plus Ethernet 'canonical format'"
+ case IANAAddressFamilyE163:
+ s = "E.163"
+ case IANAAddressFamilyE164:
+ s = "E.164 (SMDS, Frame Relay, ATM)"
+ case IANAAddressFamilyF69:
+ s = "F.69 (Telex)"
+ case IANAAddressFamilyX121:
+ s = "X.121, X.25, Frame Relay"
+ case IANAAddressFamilyIPX:
+ s = "IPX"
+ case IANAAddressFamilyAtalk:
+ s = "Appletalk"
+ case IANAAddressFamilyDecnet:
+ s = "Decnet IV"
+ case IANAAddressFamilyBanyan:
+ s = "Banyan Vines"
+ case IANAAddressFamilyE164NSAP:
+ s = "E.164 with NSAP format subaddress"
+ case IANAAddressFamilyDNS:
+ s = "DNS"
+ case IANAAddressFamilyDistname:
+ s = "Distinguished Name"
+ case IANAAddressFamilyASNumber:
+ s = "AS Number"
+ case IANAAddressFamilyXTPIPV4:
+ s = "XTP over IP version 4"
+ case IANAAddressFamilyXTPIPV6:
+ s = "XTP over IP version 6"
+ case IANAAddressFamilyXTP:
+ s = "XTP native mode XTP"
+ case IANAAddressFamilyFcWWPN:
+ s = "Fibre Channel World-Wide Port Name"
+ case IANAAddressFamilyFcWWNN:
+ s = "Fibre Channel World-Wide Node Name"
+ case IANAAddressFamilyGWID:
+ s = "GWID"
+ case IANAAddressFamilyL2VPN:
+ s = "AFI for Layer 2 VPN"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPInterfaceSubtype) String() (s string) {
+ switch t {
+ case LLDPInterfaceSubtypeUnknown:
+ s = "Unknown"
+ case LLDPInterfaceSubtypeifIndex:
+ s = "IfIndex"
+ case LLDPInterfaceSubtypeSysPort:
+ s = "System Port Number"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerType) String() (s string) {
+ switch t {
+ case 0:
+ s = "Type 2 PSE Device"
+ case 1:
+ s = "Type 2 PD Device"
+ case 2:
+ s = "Type 1 PSE Device"
+ case 3:
+ s = "Type 1 PD Device"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerSource) String() (s string) {
+ switch t {
+ // PD Device
+ case 0:
+ s = "Unknown"
+ case 1:
+ s = "PSE"
+ case 2:
+ s = "Local"
+ case 3:
+ s = "PSE and Local"
+ // PSE Device (Actual value + 128)
+ case 128:
+ s = "Unknown"
+ case 129:
+ s = "Primary Power Source"
+ case 130:
+ s = "Backup Power Source"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerPriority) String() (s string) {
+ switch t {
+ case 0:
+ s = "Unknown"
+ case 1:
+ s = "Critical"
+ case 2:
+ s = "High"
+ case 3:
+ s = "Low"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPMediaSubtype) String() (s string) {
+ switch t {
+ case LLDPMediaTypeCapabilities:
+ s = "Media Capabilities "
+ case LLDPMediaTypeNetwork:
+ s = "Network Policy"
+ case LLDPMediaTypeLocation:
+ s = "Location Identification"
+ case LLDPMediaTypePower:
+ s = "Extended Power-via-MDI"
+ case LLDPMediaTypeHardware:
+ s = "Hardware Revision"
+ case LLDPMediaTypeFirmware:
+ s = "Firmware Revision"
+ case LLDPMediaTypeSoftware:
+ s = "Software Revision"
+ case LLDPMediaTypeSerial:
+ s = "Serial Number"
+ case LLDPMediaTypeManufacturer:
+ s = "Manufacturer"
+ case LLDPMediaTypeModel:
+ s = "Model"
+ case LLDPMediaTypeAssetID:
+ s = "Asset ID"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPMediaClass) String() (s string) {
+ switch t {
+ case LLDPMediaClassUndefined:
+ s = "Undefined"
+ case LLDPMediaClassEndpointI:
+ s = "Endpoint Class I"
+ case LLDPMediaClassEndpointII:
+ s = "Endpoint Class II"
+ case LLDPMediaClassEndpointIII:
+ s = "Endpoint Class III"
+ case LLDPMediaClassNetwork:
+ s = "Network connectivity "
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPApplicationType) String() (s string) {
+ switch t {
+ case LLDPAppTypeReserved:
+ s = "Reserved"
+ case LLDPAppTypeVoice:
+ s = "Voice"
+ case LLDPappTypeVoiceSignaling:
+ s = "Voice Signaling"
+ case LLDPappTypeGuestVoice:
+ s = "Guest Voice"
+ case LLDPappTypeGuestVoiceSignaling:
+ s = "Guest Voice Signaling"
+ case LLDPappTypeSoftphoneVoice:
+ s = "Softphone Voice"
+ case LLDPappTypeVideoConferencing:
+ s = "Video Conferencing"
+ case LLDPappTypeStreamingVideo:
+ s = "Streaming Video"
+ case LLDPappTypeVideoSignaling:
+ s = "Video Signaling"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPLocationFormat) String() (s string) {
+ switch t {
+ case LLDPLocationFormatInvalid:
+ s = "Invalid"
+ case LLDPLocationFormatCoordinate:
+ s = "Coordinate-based LCI"
+ case LLDPLocationFormatAddress:
+ s = "Address-based LCO"
+ case LLDPLocationFormatECS:
+ s = "ECS ELIN"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPLocationAddressType) String() (s string) {
+ switch t {
+ case LLDPLocationAddressTypeLanguage:
+ s = "Language"
+ case LLDPLocationAddressTypeNational:
+ s = "National subdivisions (province, state, etc)"
+ case LLDPLocationAddressTypeCounty:
+ s = "County, parish, district"
+ case LLDPLocationAddressTypeCity:
+ s = "City, township"
+ case LLDPLocationAddressTypeCityDivision:
+ s = "City division, borough, ward"
+ case LLDPLocationAddressTypeNeighborhood:
+ s = "Neighborhood, block"
+ case LLDPLocationAddressTypeStreet:
+ s = "Street"
+ case LLDPLocationAddressTypeLeadingStreet:
+ s = "Leading street direction"
+ case LLDPLocationAddressTypeTrailingStreet:
+ s = "Trailing street suffix"
+ case LLDPLocationAddressTypeStreetSuffix:
+ s = "Street suffix"
+ case LLDPLocationAddressTypeHouseNum:
+ s = "House number"
+ case LLDPLocationAddressTypeHouseSuffix:
+ s = "House number suffix"
+ case LLDPLocationAddressTypeLandmark:
+ s = "Landmark or vanity address"
+ case LLDPLocationAddressTypeAdditional:
+ s = "Additional location information"
+ case LLDPLocationAddressTypeName:
+ s = "Name"
+ case LLDPLocationAddressTypePostal:
+ s = "Postal/ZIP code"
+ case LLDPLocationAddressTypeBuilding:
+ s = "Building"
+ case LLDPLocationAddressTypeUnit:
+ s = "Unit"
+ case LLDPLocationAddressTypeFloor:
+ s = "Floor"
+ case LLDPLocationAddressTypeRoom:
+ s = "Room number"
+ case LLDPLocationAddressTypePlace:
+ s = "Place type"
+ case LLDPLocationAddressTypeScript:
+ s = "Script"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) {
+ if len(v.Value) < l {
+ err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l)
+ }
+ return
+}
+
+func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) {
+ if len(o.Info) < l {
+ err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l)
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/google/gopacket/layers/loopback.go
new file mode 100644
index 0000000..839f760
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/loopback.go
@@ -0,0 +1,80 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// Loopback contains the header for loopback encapsulation. This header is
+// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL
+// and DLT_LOOP, respectively).
+type Loopback struct {
+ BaseLayer
+ Family ProtocolFamily
+}
+
+// LayerType returns LayerTypeLoopback.
+func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ return errors.New("Loopback packet too small")
+ }
+
+ // The protocol could be either big-endian or little-endian, we're
+ // not sure. But we're PRETTY sure that the value is less than
+ // 256, so we can check the first two bytes.
+ var prot uint32
+ if data[0] == 0 && data[1] == 0 {
+ prot = binary.BigEndian.Uint32(data[:4])
+ } else {
+ prot = binary.LittleEndian.Uint32(data[:4])
+ }
+ if prot > 0xFF {
+ return fmt.Errorf("Invalid loopback protocol %q", data[:4])
+ }
+
+ l.Family = ProtocolFamily(prot)
+ l.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *Loopback) CanDecode() gopacket.LayerClass {
+ return LayerTypeLoopback
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *Loopback) NextLayerType() gopacket.LayerType {
+ return l.Family.LayerType()
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ binary.LittleEndian.PutUint32(bytes, uint32(l.Family))
+ return nil
+}
+
+func decodeLoopback(data []byte, p gopacket.PacketBuilder) error {
+ l := Loopback{}
+ if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil {
+ return err
+ }
+ p.AddLayer(&l)
+ return p.NextDecoder(l.Family)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/google/gopacket/layers/mldv1.go
new file mode 100644
index 0000000..e1bb1dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv1.go
@@ -0,0 +1,182 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+// MLDv1Message represents the common structure of all MLDv1 messages
+type MLDv1Message struct {
+ BaseLayer
+ // 3.4. Maximum Response Delay
+ MaximumResponseDelay time.Duration
+ // 3.6. Multicast Address
+ // Zero in general query
+ // Specific IPv6 multicast address otherwise
+ MulticastAddress net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1")
+ }
+
+ m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond
+ // data[2:4] is reserved and not used in mldv1
+ m.MulticastAddress = data[4:20]
+
+ return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv1Message) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ if m.MaximumResponseDelay < 0 {
+ return errors.New("maximum response delay must not be negative")
+ }
+ dms := m.MaximumResponseDelay / time.Millisecond
+ if dms > math.MaxUint16 {
+ return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms)
+ }
+ binary.BigEndian.PutUint16(buf[0:2], uint16(dms))
+
+ copy(buf[2:4], []byte{0x0, 0x0})
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv1Message) String() string {
+ return fmt.Sprintf(
+ "Maximum Response Delay: %dms, Multicast Address: %s",
+ m.MaximumResponseDelay/time.Millisecond,
+ m.MulticastAddress)
+}
+
+// MLDv1MulticastListenerQueryMessage are sent by the router to determine
+// whether there are multicast listeners on the link.
+// https://tools.ietf.org/html/rfc2710 Page 5
+type MLDv1MulticastListenerQueryMessage struct {
+ MLDv1Message
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ err := m.MLDv1Message.DecodeFromBytes(data, df)
+ if err != nil {
+ return err
+ }
+
+ if len(data) > 20 {
+ m.Payload = data[20:]
+ }
+
+ return nil
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerQuery.
+func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// IsGeneralQuery is true when this is a general query.
+// In a Query message, the Multicast Address field is set to zero when
+// sending a General Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool {
+ return net.IPv6zero.Equal(m.MulticastAddress)
+}
+
+// IsSpecificQuery is true when this is not a general query.
+// In a Query message, the Multicast Address field is set to a specific
+// IPv6 multicast address when sending a Multicast-Address-Specific Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool {
+ return !m.IsGeneralQuery()
+}
+
+// MLDv1MulticastListenerReportMessage is sent by a client listening on
+// a specific multicast address to indicate that it is (still) listening
+// on the specific multicast address.
+// https://tools.ietf.org/html/rfc2710 Page 6
+type MLDv1MulticastListenerReportMessage struct {
+ MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerReport.
+func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerReport
+}
+
+// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases
+// to listen to a multicast address on an interface.
+// https://tools.ietf.org/html/rfc2710 Page 7
+type MLDv1MulticastListenerDoneMessage struct {
+ MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerDone.
+func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerDone
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerDone
+}
+
+func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerReportMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerQueryMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerDoneMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/google/gopacket/layers/mldv2.go
new file mode 100644
index 0000000..248cf74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv2.go
@@ -0,0 +1,619 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // S Flag bit is 1
+ mldv2STrue uint8 = 0x8
+
+ // S Flag value mask
+ // mldv2STrue & mldv2SMask == mldv2STrue // true
+ // 0x1 & mldv2SMask == mldv2STrue // true
+ // 0x0 & mldv2SMask == mldv2STrue // false
+ mldv2SMask uint8 = 0x8
+
+ // QRV value mask
+ mldv2QRVMask uint8 = 0x7
+)
+
+// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the
+// multicast listening state of neighboring interfaces.
+// https://tools.ietf.org/html/rfc3810#section-5.1
+//
+// Some information, like Maximum Response Code and Multicast Address are in the
+// previous layer LayerTypeMLDv1MulticastListenerQuery
+type MLDv2MulticastListenerQueryMessage struct {
+ BaseLayer
+ // 5.1.3. Maximum Response Delay COde
+ MaximumResponseCode uint16
+ // 5.1.5. Multicast Address
+ // Zero in general query
+ // Specific IPv6 multicast address otherwise
+ MulticastAddress net.IP
+ // 5.1.7. S Flag (Suppress Router-Side Processing)
+ SuppressRoutersideProcessing bool
+ // 5.1.8. QRV (Querier's Robustness Variable)
+ QueriersRobustnessVariable uint8
+ // 5.1.9. QQIC (Querier's Query Interval Code)
+ QueriersQueryIntervalCode uint8
+ // 5.1.10. Number of Sources (N)
+ NumberOfSources uint16
+ // 5.1.11 Source Address [i]
+ SourceAddresses []net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 24 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2")
+ }
+
+ m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2])
+ // ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4
+ m.MulticastAddress = data[4:20]
+ m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue
+ m.QueriersRobustnessVariable = data[20] & mldv2QRVMask
+ m.QueriersQueryIntervalCode = data[21]
+
+ m.NumberOfSources = binary.BigEndian.Uint16(data[22:24])
+
+ var end int
+ for i := uint16(0); i < m.NumberOfSources; i++ {
+ begin := 24 + (int(i) * 16)
+ end = begin + 16
+
+ if end > len(data) {
+ df.SetTruncated()
+ return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end)
+ }
+
+ m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+ }
+
+ return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(24)
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode)
+ copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ byte20 := m.QueriersRobustnessVariable & mldv2QRVMask
+ if m.SuppressRoutersideProcessing {
+ byte20 |= mldv2STrue
+ } else {
+ byte20 &= ^mldv2STrue // the complement of mldv2STrue
+ }
+ byte20 &= 0x0F // set reserved bits to zero
+ buf[20] = byte20
+
+ binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources)
+ buf[21] = m.QueriersQueryIntervalCode
+
+ return nil
+}
+
+// writes each source address to the buffer preserving the order
+func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ numberOfSourceAddresses := len(m.SourceAddresses)
+ if numberOfSourceAddresses > math.MaxUint16 {
+ return fmt.Errorf(
+ "there are more than %d source addresses, but 65535 is the maximum number of supported addresses",
+ numberOfSourceAddresses)
+ }
+
+ if opts.FixLengths {
+ m.NumberOfSources = uint16(numberOfSourceAddresses)
+ }
+
+ lastSAIdx := numberOfSourceAddresses - 1
+ for k := range m.SourceAddresses {
+ i := lastSAIdx - k // reverse order
+
+ buf, err := b.PrependBytes(16)
+ if err != nil {
+ return err
+ }
+
+ sa16 := m.SourceAddresses[i].To16()
+ if sa16 == nil {
+ return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+ }
+ copy(buf[0:16], sa16)
+ }
+
+ return nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerQueryMessage) String() string {
+ return fmt.Sprintf(
+ "Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s",
+ m.MaximumResponseCode,
+ m.MaximumResponseDelay(),
+ m.MulticastAddress,
+ m.SuppressRoutersideProcessing,
+ m.QueriersRobustnessVariable,
+ m.QueriersQueryIntervalCode,
+ m.QQI()/time.Second,
+ m.NumberOfSources,
+ len(m.SourceAddresses),
+ m.SourceAddresses)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// QQI calculates the Querier's Query Interval based on the QQIC
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration {
+ data := m.QueriersQueryIntervalCode
+ if data < 128 {
+ return time.Second * time.Duration(data)
+ }
+
+ exp := uint16(data) & 0x70 >> 4
+ mant := uint16(data) & 0x0F
+ return time.Second * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetQQI calculates and updates the Querier's Query Interval Code (QQIC)
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error {
+ if d < 0 {
+ m.QueriersQueryIntervalCode = 0
+ return errors.New("QQI duration is negative")
+ }
+
+ if d == 0 {
+ m.QueriersQueryIntervalCode = 0
+ return nil
+ }
+
+ dms := d / time.Second
+ if dms < 128 {
+ m.QueriersQueryIntervalCode = uint8(dms)
+ }
+
+ if dms > 31744 { // mant=0xF, exp=0x7
+ m.QueriersQueryIntervalCode = 0xFF
+ return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms)
+ }
+
+ value := uint16(dms) // ok, because 31744 < math.MaxUint16
+ exp := uint8(7)
+ for mask := uint16(0x4000); exp > 0; exp-- {
+ if mask&value != 0 {
+ break
+ }
+
+ mask >>= 1
+ }
+
+ mant := uint8(0x000F & (value >> (exp + 3)))
+ sig := uint8(0x10)
+ m.QueriersQueryIntervalCode = sig | exp<<4 | mant
+
+ return nil
+}
+
+// MaximumResponseDelay returns the Maximum Response Delay based on the
+// Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration {
+ if m.MaximumResponseCode < 0x8000 {
+ return time.Duration(m.MaximumResponseCode)
+ }
+
+ exp := m.MaximumResponseCode & 0x7000 >> 12
+ mant := m.MaximumResponseCode & 0x0FFF
+
+ return time.Millisecond * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error {
+ if d == 0 {
+ m.MaximumResponseCode = 0
+ return nil
+ }
+
+ if d < 0 {
+ return errors.New("maximum response delay must not be negative")
+ }
+
+ dms := d / time.Millisecond
+
+ if dms < 32768 {
+ m.MaximumResponseCode = uint16(dms)
+ }
+
+ if dms > 4193280 { // mant=0xFFF, exp=0x7
+ return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms)
+ }
+
+ value := uint32(dms) // ok, because 4193280 < math.MaxUint32
+ exp := uint8(7)
+ for mask := uint32(0x40000000); exp > 0; exp-- {
+ if mask&value != 0 {
+ break
+ }
+
+ mask >>= 1
+ }
+
+ mant := uint16(0x00000FFF & (value >> (exp + 3)))
+ sig := uint16(0x1000)
+ m.MaximumResponseCode = sig | uint16(exp)<<12 | mant
+ return nil
+}
+
+// MLDv2MulticastListenerReportMessage is sent by an IP node to report the
+// current multicast listening state, or changes therein.
+// https://tools.ietf.org/html/rfc3810#section-5.2
+type MLDv2MulticastListenerReportMessage struct {
+ BaseLayer
+ // 5.2.3. Nr of Mcast Address Records
+ NumberOfMulticastAddressRecords uint16
+ // 5.2.4. Multicast Address Record [i]
+ MulticastAddressRecords []MLDv2MulticastAddressRecord
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2")
+ }
+
+ // ignore data[0:2] as per RFC
+ // https://tools.ietf.org/html/rfc3810#section-5.2.1
+ m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4])
+
+ begin := 4
+ for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ {
+ mar := MLDv2MulticastAddressRecord{}
+ read, err := mar.decode(data[begin:], df)
+ if err != nil {
+ return err
+ }
+
+ m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar)
+
+ begin += read
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ lastItemIdx := len(m.MulticastAddressRecords) - 1
+ for k := range m.MulticastAddressRecords {
+ i := lastItemIdx - k // reverse order
+
+ err := m.MulticastAddressRecords[i].serializeTo(b, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ if opts.FixLengths {
+ numberOfMAR := len(m.MulticastAddressRecords)
+ if numberOfMAR > math.MaxUint16 {
+ return fmt.Errorf(
+ "%d multicast address records added, but the maximum is 65535",
+ numberOfMAR)
+ }
+
+ m.NumberOfMulticastAddressRecords = uint16(numberOfMAR)
+ }
+
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ copy(buf[0:2], []byte{0x0, 0x0})
+ binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords)
+ return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerReportMessage) String() string {
+ return fmt.Sprintf(
+ "Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v",
+ m.NumberOfMulticastAddressRecords,
+ len(m.MulticastAddressRecords),
+ m.MulticastAddressRecords)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv2MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv2MulticastListenerReport
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// MLDv2MulticastAddressRecordType holds the type of a
+// Multicast Address Record, according to
+// https://tools.ietf.org/html/rfc3810#section-5.2.5 and
+// https://tools.ietf.org/html/rfc3810#section-5.2.12
+type MLDv2MulticastAddressRecordType uint8
+
+const (
+ // MLDv2MulticastAddressRecordTypeModeIsIncluded stands for
+ // MODE_IS_INCLUDE - indicates that the interface has a filter
+ // mode of INCLUDE for the specified multicast address.
+ MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1
+ // MLDv2MulticastAddressRecordTypeModeIsExcluded stands for
+ // MODE_IS_EXCLUDE - indicates that the interface has a filter
+ // mode of EXCLUDE for the specified multicast address.
+ MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2
+ // MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for
+ // CHANGE_TO_INCLUDE_MODE - indicates that the interface has
+ // changed to INCLUDE filter mode for the specified multicast
+ // address.
+ MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3
+ // MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for
+ // CHANGE_TO_EXCLUDE_MODE - indicates that the interface has
+ // changed to EXCLUDE filter mode for the specified multicast
+ // address
+ MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4
+ // MLDv2MulticastAddressRecordTypeAllowNewSources stands for
+ // ALLOW_NEW_SOURCES - indicates that the Source Address [i]
+ // fields in this Multicast Address Record contain a list of
+ // the additional sources that the node wishes to listen to,
+ // for packets sent to the specified multicast address.
+ MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5
+ // MLDv2MulticastAddressRecordTypeBlockOldSources stands for
+ // BLOCK_OLD_SOURCES - indicates that the Source Address [i]
+ // fields in this Multicast Address Record contain a list of
+ // the sources that the node no longer wishes to listen to,
+ // for packets sent to the specified multicast address.
+ MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6
+)
+
+// Human readable record types
+// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12
+func (m MLDv2MulticastAddressRecordType) String() string {
+ switch m {
+ case MLDv2MulticastAddressRecordTypeModeIsIncluded:
+ return "MODE_IS_INCLUDE"
+ case MLDv2MulticastAddressRecordTypeModeIsExcluded:
+ return "MODE_IS_EXCLUDE"
+ case MLDv2MulticastAddressRecordTypeChangeToIncludeMode:
+ return "CHANGE_TO_INCLUDE_MODE"
+ case MLDv2MulticastAddressRecordTypeChangeToExcludeMode:
+ return "CHANGE_TO_EXCLUDE_MODE"
+ case MLDv2MulticastAddressRecordTypeAllowNewSources:
+ return "ALLOW_NEW_SOURCES"
+ case MLDv2MulticastAddressRecordTypeBlockOldSources:
+ return "BLOCK_OLD_SOURCES"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", m)
+ }
+}
+
+// MLDv2MulticastAddressRecord contains information on the sender listening to a
+// single multicast address on the interface the report is sent.
+// https://tools.ietf.org/html/rfc3810#section-5.2.4
+type MLDv2MulticastAddressRecord struct {
+ // 5.2.5. Record Type
+ RecordType MLDv2MulticastAddressRecordType
+ // 5.2.6. Auxiliary Data Length (number of 32-bit words)
+ AuxDataLen uint8
+ // 5.2.7. Number Of Sources (N)
+ N uint16
+ // 5.2.8. Multicast Address
+ MulticastAddress net.IP
+ // 5.2.9 Source Address [i]
+ SourceAddresses []net.IP
+ // 5.2.10 Auxiliary Data
+ AuxiliaryData []byte
+}
+
+// decodes a multicast address record from bytes
+func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return 0, errors.New(
+ "Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record")
+ }
+
+ m.RecordType = MLDv2MulticastAddressRecordType(data[0])
+ m.AuxDataLen = data[1]
+ m.N = binary.BigEndian.Uint16(data[2:4])
+ m.MulticastAddress = data[4:20]
+
+ for i := uint16(0); i < m.N; i++ {
+ begin := 20 + (int(i) * 16)
+ end := begin + 16
+
+ if len(data) < end {
+ df.SetTruncated()
+ return begin, fmt.Errorf(
+ "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end)
+ }
+
+ m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+ }
+
+ expectedLengthWithouAuxData := 20 + (int(m.N) * 16)
+ expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words
+ if len(data) < expectedTotalLength {
+ return expectedLengthWithouAuxData, fmt.Errorf(
+ "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record",
+ expectedLengthWithouAuxData)
+ }
+
+ m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength]
+
+ return expectedTotalLength, nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastAddressRecord) String() string {
+ return fmt.Sprintf(
+ "RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x",
+ m.RecordType,
+ m.RecordType.String(),
+ m.AuxDataLen,
+ m.N,
+ m.MulticastAddress.To16(),
+ m.SourceAddresses,
+ m.AuxiliaryData)
+}
+
+// serializes a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := m.serializeAuxiliaryDataTo(b, opts); err != nil {
+ return err
+ }
+
+ if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = uint8(m.RecordType)
+ buf[1] = m.AuxDataLen
+ binary.BigEndian.PutUint16(buf[2:4], m.N)
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ return nil
+}
+
+// serializes the auxiliary data of a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if remainder := len(m.AuxiliaryData) % 4; remainder != 0 {
+ zeroWord := []byte{0x0, 0x0, 0x0, 0x0}
+ m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...)
+ }
+
+ if opts.FixLengths {
+ auxDataLen := len(m.AuxiliaryData) / 4
+
+ if auxDataLen > math.MaxUint8 {
+ return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen)
+ }
+
+ m.AuxDataLen = uint8(auxDataLen)
+ }
+
+ buf, err := b.PrependBytes(len(m.AuxiliaryData))
+ if err != nil {
+ return err
+ }
+
+ copy(buf, m.AuxiliaryData)
+ return nil
+}
+
+// serializes the source addresses of a multicast address record preserving the order
+func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if opts.FixLengths {
+ numberOfSourceAddresses := len(m.SourceAddresses)
+
+ if numberOfSourceAddresses > math.MaxUint16 {
+ return fmt.Errorf(
+ "%d source addresses added, but the maximum is 65535",
+ numberOfSourceAddresses)
+ }
+
+ m.N = uint16(numberOfSourceAddresses)
+ }
+
+ lastItemIdx := len(m.SourceAddresses) - 1
+ for k := range m.SourceAddresses {
+ i := lastItemIdx - k // reverse order
+
+ buf, err := b.PrependBytes(16)
+ if err != nil {
+ return err
+ }
+
+ sa16 := m.SourceAddresses[i].To16()
+ if sa16 == nil {
+ return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+ }
+ copy(buf, sa16)
+ }
+
+ return nil
+}
+
+func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv2MulticastListenerReportMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv2MulticastListenerQueryMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/google/gopacket/layers/modbustcp.go
new file mode 100644
index 0000000..bafbd74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/modbustcp.go
@@ -0,0 +1,150 @@
+// Copyright 2018, The GoPacket Authors, All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// ModbusTCP Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for ModbusTCP.
+//
+//******************************************************************************
+
+const mbapRecordSizeInBytes int = 7
+const modbusPDUMinimumRecordSizeInBytes int = 2
+const modbusPDUMaximumRecordSizeInBytes int = 253
+
+// ModbusProtocol type
+type ModbusProtocol uint16
+
+// ModbusProtocol known values.
+const (
+ ModbusProtocolModbus ModbusProtocol = 0
+)
+
+func (mp ModbusProtocol) String() string {
+ switch mp {
+ default:
+ return "Unknown"
+ case ModbusProtocolModbus:
+ return "Modbus"
+ }
+}
+
+//******************************************************************************
+
+// ModbusTCP Type
+// --------
+// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object
+// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP
+// payload in an ModbusTCP TCP packet.
+//
+type ModbusTCP struct {
+ BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes .
+
+ TransactionIdentifier uint16 // Identification of a MODBUS Request/Response transaction
+ ProtocolIdentifier ModbusProtocol // It is used for intra-system multiplexing
+ Length uint16 // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length
+ UnitIdentifier uint8 // Identification of a remote slave connected on a serial line or on other buses
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.
+func (d *ModbusTCP) LayerType() gopacket.LayerType {
+ return LayerTypeModbusTCP
+}
+
+//******************************************************************************
+
+// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the ModbusTCP layer.
+func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &ModbusTCP{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return p.NextDecoder(d.NextLayerType())
+
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// Upon succeeds, it loads the ModbusTCP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a MBAP record, then return an error.
+ if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet too short")
+ }
+
+ if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet too long")
+ }
+
+ // ModbusTCP type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level (MPBA).
+ // Payload is supposed to contain the payload of this level (PDU).
+ d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // The fields can just be copied in big endian order.
+ d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])
+ d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4]))
+ d.Length = binary.BigEndian.Uint16(data[4:6])
+
+ // Length should have the size of the payload plus one byte (size of UnitIdentifier)
+ if d.Length != uint16(len(d.BaseLayer.Payload)+1) {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet with wrong field value (Length)")
+ }
+ d.UnitIdentifier = uint8(data[6])
+
+ return nil
+}
+
+//******************************************************************************
+
+// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.
+func (d *ModbusTCP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+//******************************************************************************
+
+// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets
+func (d *ModbusTCP) Payload() []byte {
+ return d.BaseLayer.Payload
+}
diff --git a/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/google/gopacket/layers/mpls.go
new file mode 100644
index 0000000..83079a0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mpls.go
@@ -0,0 +1,87 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+// MPLS is the MPLS packet header.
+type MPLS struct {
+ BaseLayer
+ Label uint32
+ TrafficClass uint8
+ StackBottom bool
+ TTL uint8
+}
+
+// LayerType returns gopacket.LayerTypeMPLS.
+func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS }
+
+// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's
+// given, then decode the packet accordingly. Its algorithm for guessing is:
+// If the packet starts with byte 0x45-0x4F: IPv4
+// If the packet starts with byte 0x60-0x6F: IPv6
+// Otherwise: Error
+// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach.
+type ProtocolGuessingDecoder struct{}
+
+func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error {
+ switch data[0] {
+ // 0x40 | header_len, where header_len is at least 5.
+ case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f:
+ return decodeIPv4(data, p)
+ // IPv6 can start with any byte whose first 4 bits are 0x6.
+ case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f:
+ return decodeIPv6(data, p)
+ }
+ return errors.New("Unable to guess protocol of packet data")
+}
+
+// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS
+// layer. MPLS contains no type information, so we have to explicitly decide
+// which decoder to use. This is initially set to ProtocolGuessingDecoder, our
+// simple attempt at guessing protocols based on the first few bytes of data
+// available to us. However, if you know that in your environment MPLS always
+// encapsulates a specific protocol, you may reset this.
+var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{}
+
+func decodeMPLS(data []byte, p gopacket.PacketBuilder) error {
+ decoded := binary.BigEndian.Uint32(data[:4])
+ mpls := &MPLS{
+ Label: decoded >> 12,
+ TrafficClass: uint8(decoded>>9) & 0x7,
+ StackBottom: decoded&0x100 != 0,
+ TTL: uint8(decoded),
+ BaseLayer: BaseLayer{data[:4], data[4:]},
+ }
+ p.AddLayer(mpls)
+ if mpls.StackBottom {
+ return p.NextDecoder(MPLSPayloadDecoder)
+ }
+ return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS))
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ encoded := m.Label << 12
+ encoded |= uint32(m.TrafficClass) << 9
+ encoded |= uint32(m.TTL)
+ if m.StackBottom {
+ encoded |= 0x100
+ }
+ binary.BigEndian.PutUint32(bytes, encoded)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/google/gopacket/layers/ndp.go
new file mode 100644
index 0000000..f7ca1b2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ndp.go
@@ -0,0 +1,611 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c
+
+package layers
+
+import (
+ "fmt"
+ "github.com/google/gopacket"
+ "net"
+)
+
+type NDPChassisType uint8
+
+// Nortel Chassis Types
+const (
+ NDPChassisother NDPChassisType = 1
+ NDPChassis3000 NDPChassisType = 2
+ NDPChassis3030 NDPChassisType = 3
+ NDPChassis2310 NDPChassisType = 4
+ NDPChassis2810 NDPChassisType = 5
+ NDPChassis2912 NDPChassisType = 6
+ NDPChassis2914 NDPChassisType = 7
+ NDPChassis271x NDPChassisType = 8
+ NDPChassis2813 NDPChassisType = 9
+ NDPChassis2814 NDPChassisType = 10
+ NDPChassis2915 NDPChassisType = 11
+ NDPChassis5000 NDPChassisType = 12
+ NDPChassis2813SA NDPChassisType = 13
+ NDPChassis2814SA NDPChassisType = 14
+ NDPChassis810M NDPChassisType = 15
+ NDPChassisEthercell NDPChassisType = 16
+ NDPChassis5005 NDPChassisType = 17
+ NDPChassisAlcatelEWC NDPChassisType = 18
+ NDPChassis2715SA NDPChassisType = 20
+ NDPChassis2486 NDPChassisType = 21
+ NDPChassis28000series NDPChassisType = 22
+ NDPChassis23000series NDPChassisType = 23
+ NDPChassis5DN00xseries NDPChassisType = 24
+ NDPChassisBayStackEthernet NDPChassisType = 25
+ NDPChassis23100series NDPChassisType = 26
+ NDPChassis100BaseTHub NDPChassisType = 27
+ NDPChassis3000FastEthernet NDPChassisType = 28
+ NDPChassisOrionSwitch NDPChassisType = 29
+ NDPChassisDDS NDPChassisType = 31
+ NDPChassisCentillion6slot NDPChassisType = 32
+ NDPChassisCentillion12slot NDPChassisType = 33
+ NDPChassisCentillion1slot NDPChassisType = 34
+ NDPChassisBayStack301 NDPChassisType = 35
+ NDPChassisBayStackTokenRingHub NDPChassisType = 36
+ NDPChassisFVCMultimediaSwitch NDPChassisType = 37
+ NDPChassisSwitchNode NDPChassisType = 38
+ NDPChassisBayStack302Switch NDPChassisType = 39
+ NDPChassisBayStack350Switch NDPChassisType = 40
+ NDPChassisBayStack150EthernetHub NDPChassisType = 41
+ NDPChassisCentillion50NSwitch NDPChassisType = 42
+ NDPChassisCentillion50TSwitch NDPChassisType = 43
+ NDPChassisBayStack303304Switches NDPChassisType = 44
+ NDPChassisBayStack200EthernetHub NDPChassisType = 45
+ NDPChassisBayStack25010100EthernetHub NDPChassisType = 46
+ NDPChassisBayStack450101001000Switches NDPChassisType = 48
+ NDPChassisBayStack41010100Switches NDPChassisType = 49
+ NDPChassisPassport1200L3Switch NDPChassisType = 50
+ NDPChassisPassport1250L3Switch NDPChassisType = 51
+ NDPChassisPassport1100L3Switch NDPChassisType = 52
+ NDPChassisPassport1150L3Switch NDPChassisType = 53
+ NDPChassisPassport1050L3Switch NDPChassisType = 54
+ NDPChassisPassport1051L3Switch NDPChassisType = 55
+ NDPChassisPassport8610L3Switch NDPChassisType = 56
+ NDPChassisPassport8606L3Switch NDPChassisType = 57
+ NDPChassisPassport8010 NDPChassisType = 58
+ NDPChassisPassport8006 NDPChassisType = 59
+ NDPChassisBayStack670wirelessaccesspoint NDPChassisType = 60
+ NDPChassisPassport740 NDPChassisType = 61
+ NDPChassisPassport750 NDPChassisType = 62
+ NDPChassisPassport790 NDPChassisType = 63
+ NDPChassisBusinessPolicySwitch200010100Switches NDPChassisType = 64
+ NDPChassisPassport8110L2Switch NDPChassisType = 65
+ NDPChassisPassport8106L2Switch NDPChassisType = 66
+ NDPChassisBayStack3580GigSwitch NDPChassisType = 67
+ NDPChassisBayStack10PowerSupplyUnit NDPChassisType = 68
+ NDPChassisBayStack42010100Switch NDPChassisType = 69
+ NDPChassisOPTeraMetro1200EthernetServiceModule NDPChassisType = 70
+ NDPChassisOPTera8010co NDPChassisType = 71
+ NDPChassisOPTera8610coL3Switch NDPChassisType = 72
+ NDPChassisOPTera8110coL2Switch NDPChassisType = 73
+ NDPChassisOPTera8003 NDPChassisType = 74
+ NDPChassisOPTera8603L3Switch NDPChassisType = 75
+ NDPChassisOPTera8103L2Switch NDPChassisType = 76
+ NDPChassisBayStack380101001000Switch NDPChassisType = 77
+ NDPChassisEthernetSwitch47048T NDPChassisType = 78
+ NDPChassisOPTeraMetro1450EthernetServiceModule NDPChassisType = 79
+ NDPChassisOPTeraMetro1400EthernetServiceModule NDPChassisType = 80
+ NDPChassisAlteonSwitchFamily NDPChassisType = 81
+ NDPChassisEthernetSwitch46024TPWR NDPChassisType = 82
+ NDPChassisOPTeraMetro8010OPML2Switch NDPChassisType = 83
+ NDPChassisOPTeraMetro8010coOPML2Switch NDPChassisType = 84
+ NDPChassisOPTeraMetro8006OPML2Switch NDPChassisType = 85
+ NDPChassisOPTeraMetro8003OPML2Switch NDPChassisType = 86
+ NDPChassisAlteon180e NDPChassisType = 87
+ NDPChassisAlteonAD3 NDPChassisType = 88
+ NDPChassisAlteon184 NDPChassisType = 89
+ NDPChassisAlteonAD4 NDPChassisType = 90
+ NDPChassisPassport1424L3Switch NDPChassisType = 91
+ NDPChassisPassport1648L3Switch NDPChassisType = 92
+ NDPChassisPassport1612L3Switch NDPChassisType = 93
+ NDPChassisPassport1624L3Switch NDPChassisType = 94
+ NDPChassisBayStack38024FFiber1000Switch NDPChassisType = 95
+ NDPChassisEthernetRoutingSwitch551024T NDPChassisType = 96
+ NDPChassisEthernetRoutingSwitch551048T NDPChassisType = 97
+ NDPChassisEthernetSwitch47024T NDPChassisType = 98
+ NDPChassisNortelNetworksWirelessLANAccessPoint2220 NDPChassisType = 99
+ NDPChassisPassportRBS2402L3Switch NDPChassisType = 100
+ NDPChassisAlteonApplicationSwitch2424 NDPChassisType = 101
+ NDPChassisAlteonApplicationSwitch2224 NDPChassisType = 102
+ NDPChassisAlteonApplicationSwitch2208 NDPChassisType = 103
+ NDPChassisAlteonApplicationSwitch2216 NDPChassisType = 104
+ NDPChassisAlteonApplicationSwitch3408 NDPChassisType = 105
+ NDPChassisAlteonApplicationSwitch3416 NDPChassisType = 106
+ NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107
+ NDPChassisEthernetSwitch42548T NDPChassisType = 108
+ NDPChassisEthernetSwitch42524T NDPChassisType = 109
+ NDPChassisNortelNetworksWirelessLANAccessPoint2221 NDPChassisType = 110
+ NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch NDPChassisType = 111
+ NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112
+ NDPChassisPassport830010slotchassis NDPChassisType = 113
+ NDPChassisPassport83006slotchassis NDPChassisType = 114
+ NDPChassisEthernetRoutingSwitch552024TPWR NDPChassisType = 115
+ NDPChassisEthernetRoutingSwitch552048TPWR NDPChassisType = 116
+ NDPChassisNortelNetworksVPNGateway3050 NDPChassisType = 117
+ NDPChassisAlteonSSL31010100 NDPChassisType = 118
+ NDPChassisAlteonSSL31010100Fiber NDPChassisType = 119
+ NDPChassisAlteonSSL31010100FIPS NDPChassisType = 120
+ NDPChassisAlteonSSL410101001000 NDPChassisType = 121
+ NDPChassisAlteonSSL410101001000Fiber NDPChassisType = 122
+ NDPChassisAlteonApplicationSwitch2424SSL NDPChassisType = 123
+ NDPChassisEthernetSwitch32524T NDPChassisType = 124
+ NDPChassisEthernetSwitch32524G NDPChassisType = 125
+ NDPChassisNortelNetworksWirelessLANAccessPoint2225 NDPChassisType = 126
+ NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127
+ NDPChassis24portEthernetSwitch47024TPWR NDPChassisType = 128
+ NDPChassis48portEthernetSwitch47048TPWR NDPChassisType = 129
+ NDPChassisEthernetRoutingSwitch553024TFD NDPChassisType = 130
+ NDPChassisEthernetSwitch351024T NDPChassisType = 131
+ NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132
+ NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133
+ NDPChassisNortelSecureAccessSwitch NDPChassisType = 134
+ NDPChassisNortelNetworksVPNGateway3070 NDPChassisType = 135
+ NDPChassisOPTeraMetro3500 NDPChassisType = 136
+ NDPChassisSMBBES101024T NDPChassisType = 137
+ NDPChassisSMBBES101048T NDPChassisType = 138
+ NDPChassisSMBBES102024TPWR NDPChassisType = 139
+ NDPChassisSMBBES102048TPWR NDPChassisType = 140
+ NDPChassisSMBBES201024T NDPChassisType = 141
+ NDPChassisSMBBES201048T NDPChassisType = 142
+ NDPChassisSMBBES202024TPWR NDPChassisType = 143
+ NDPChassisSMBBES202048TPWR NDPChassisType = 144
+ NDPChassisSMBBES11024T NDPChassisType = 145
+ NDPChassisSMBBES11048T NDPChassisType = 146
+ NDPChassisSMBBES12024TPWR NDPChassisType = 147
+ NDPChassisSMBBES12048TPWR NDPChassisType = 148
+ NDPChassisSMBBES21024T NDPChassisType = 149
+ NDPChassisSMBBES21048T NDPChassisType = 150
+ NDPChassisSMBBES22024TPWR NDPChassisType = 151
+ NDPChassisSMBBES22048TPWR NDPChassisType = 152
+ NDPChassisOME6500 NDPChassisType = 153
+ NDPChassisEthernetRoutingSwitch4548GT NDPChassisType = 154
+ NDPChassisEthernetRoutingSwitch4548GTPWR NDPChassisType = 155
+ NDPChassisEthernetRoutingSwitch4550T NDPChassisType = 156
+ NDPChassisEthernetRoutingSwitch4550TPWR NDPChassisType = 157
+ NDPChassisEthernetRoutingSwitch4526FX NDPChassisType = 158
+ NDPChassisEthernetRoutingSwitch250026T NDPChassisType = 159
+ NDPChassisEthernetRoutingSwitch250026TPWR NDPChassisType = 160
+ NDPChassisEthernetRoutingSwitch250050T NDPChassisType = 161
+ NDPChassisEthernetRoutingSwitch250050TPWR NDPChassisType = 162
+)
+
+type NDPBackplaneType uint8
+
+// Nortel Backplane Types
+const (
+ NDPBackplaneOther NDPBackplaneType = 1
+ NDPBackplaneEthernet NDPBackplaneType = 2
+ NDPBackplaneEthernetTokenring NDPBackplaneType = 3
+ NDPBackplaneEthernetFDDI NDPBackplaneType = 4
+ NDPBackplaneEthernetTokenringFDDI NDPBackplaneType = 5
+ NDPBackplaneEthernetTokenringRedundantPower NDPBackplaneType = 6
+ NDPBackplaneEthernetTokenringFDDIRedundantPower NDPBackplaneType = 7
+ NDPBackplaneTokenRing NDPBackplaneType = 8
+ NDPBackplaneEthernetTokenringFastEthernet NDPBackplaneType = 9
+ NDPBackplaneEthernetFastEthernet NDPBackplaneType = 10
+ NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11
+ NDPBackplaneEthernetFastEthernetGigabitEthernet NDPBackplaneType = 12
+)
+
+type NDPState uint8
+
+// Device State
+const (
+ NDPStateTopology NDPState = 1
+ NDPStateHeartbeat NDPState = 2
+ NDPStateNew NDPState = 3
+)
+
+// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol.
+type NortelDiscovery struct {
+ BaseLayer
+ IPAddress net.IP
+ SegmentID []byte
+ Chassis NDPChassisType
+ Backplane NDPBackplaneType
+ State NDPState
+ NumLinks uint8
+}
+
+// LayerType returns gopacket.LayerTypeNortelDiscovery.
+func (c *NortelDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeNortelDiscovery
+}
+
+func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ c := &NortelDiscovery{}
+ if len(data) < 11 {
+ return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data))
+ }
+ c.IPAddress = data[0:4]
+ c.SegmentID = data[4:7]
+ c.Chassis = NDPChassisType(data[7])
+ c.Backplane = NDPBackplaneType(data[8])
+ c.State = NDPState(data[9])
+ c.NumLinks = uint8(data[10])
+ p.AddLayer(c)
+ return nil
+}
+
+func (t NDPChassisType) String() (s string) {
+ switch t {
+ case NDPChassisother:
+ s = "other"
+ case NDPChassis3000:
+ s = "3000"
+ case NDPChassis3030:
+ s = "3030"
+ case NDPChassis2310:
+ s = "2310"
+ case NDPChassis2810:
+ s = "2810"
+ case NDPChassis2912:
+ s = "2912"
+ case NDPChassis2914:
+ s = "2914"
+ case NDPChassis271x:
+ s = "271x"
+ case NDPChassis2813:
+ s = "2813"
+ case NDPChassis2814:
+ s = "2814"
+ case NDPChassis2915:
+ s = "2915"
+ case NDPChassis5000:
+ s = "5000"
+ case NDPChassis2813SA:
+ s = "2813SA"
+ case NDPChassis2814SA:
+ s = "2814SA"
+ case NDPChassis810M:
+ s = "810M"
+ case NDPChassisEthercell:
+ s = "Ethercell"
+ case NDPChassis5005:
+ s = "5005"
+ case NDPChassisAlcatelEWC:
+ s = "Alcatel Ethernet workgroup conc."
+ case NDPChassis2715SA:
+ s = "2715SA"
+ case NDPChassis2486:
+ s = "2486"
+ case NDPChassis28000series:
+ s = "28000 series"
+ case NDPChassis23000series:
+ s = "23000 series"
+ case NDPChassis5DN00xseries:
+ s = "5DN00x series"
+ case NDPChassisBayStackEthernet:
+ s = "BayStack Ethernet"
+ case NDPChassis23100series:
+ s = "23100 series"
+ case NDPChassis100BaseTHub:
+ s = "100Base-T Hub"
+ case NDPChassis3000FastEthernet:
+ s = "3000 Fast Ethernet"
+ case NDPChassisOrionSwitch:
+ s = "Orion switch"
+ case NDPChassisDDS:
+ s = "DDS"
+ case NDPChassisCentillion6slot:
+ s = "Centillion (6 slot)"
+ case NDPChassisCentillion12slot:
+ s = "Centillion (12 slot)"
+ case NDPChassisCentillion1slot:
+ s = "Centillion (1 slot)"
+ case NDPChassisBayStack301:
+ s = "BayStack 301"
+ case NDPChassisBayStackTokenRingHub:
+ s = "BayStack TokenRing Hub"
+ case NDPChassisFVCMultimediaSwitch:
+ s = "FVC Multimedia Switch"
+ case NDPChassisSwitchNode:
+ s = "Switch Node"
+ case NDPChassisBayStack302Switch:
+ s = "BayStack 302 Switch"
+ case NDPChassisBayStack350Switch:
+ s = "BayStack 350 Switch"
+ case NDPChassisBayStack150EthernetHub:
+ s = "BayStack 150 Ethernet Hub"
+ case NDPChassisCentillion50NSwitch:
+ s = "Centillion 50N switch"
+ case NDPChassisCentillion50TSwitch:
+ s = "Centillion 50T switch"
+ case NDPChassisBayStack303304Switches:
+ s = "BayStack 303 and 304 Switches"
+ case NDPChassisBayStack200EthernetHub:
+ s = "BayStack 200 Ethernet Hub"
+ case NDPChassisBayStack25010100EthernetHub:
+ s = "BayStack 250 10/100 Ethernet Hub"
+ case NDPChassisBayStack450101001000Switches:
+ s = "BayStack 450 10/100/1000 Switches"
+ case NDPChassisBayStack41010100Switches:
+ s = "BayStack 410 10/100 Switches"
+ case NDPChassisPassport1200L3Switch:
+ s = "Passport 1200 L3 Switch"
+ case NDPChassisPassport1250L3Switch:
+ s = "Passport 1250 L3 Switch"
+ case NDPChassisPassport1100L3Switch:
+ s = "Passport 1100 L3 Switch"
+ case NDPChassisPassport1150L3Switch:
+ s = "Passport 1150 L3 Switch"
+ case NDPChassisPassport1050L3Switch:
+ s = "Passport 1050 L3 Switch"
+ case NDPChassisPassport1051L3Switch:
+ s = "Passport 1051 L3 Switch"
+ case NDPChassisPassport8610L3Switch:
+ s = "Passport 8610 L3 Switch"
+ case NDPChassisPassport8606L3Switch:
+ s = "Passport 8606 L3 Switch"
+ case NDPChassisPassport8010:
+ s = "Passport 8010"
+ case NDPChassisPassport8006:
+ s = "Passport 8006"
+ case NDPChassisBayStack670wirelessaccesspoint:
+ s = "BayStack 670 wireless access point"
+ case NDPChassisPassport740:
+ s = "Passport 740"
+ case NDPChassisPassport750:
+ s = "Passport 750"
+ case NDPChassisPassport790:
+ s = "Passport 790"
+ case NDPChassisBusinessPolicySwitch200010100Switches:
+ s = "Business Policy Switch 2000 10/100 Switches"
+ case NDPChassisPassport8110L2Switch:
+ s = "Passport 8110 L2 Switch"
+ case NDPChassisPassport8106L2Switch:
+ s = "Passport 8106 L2 Switch"
+ case NDPChassisBayStack3580GigSwitch:
+ s = "BayStack 3580 Gig Switch"
+ case NDPChassisBayStack10PowerSupplyUnit:
+ s = "BayStack 10 Power Supply Unit"
+ case NDPChassisBayStack42010100Switch:
+ s = "BayStack 420 10/100 Switch"
+ case NDPChassisOPTeraMetro1200EthernetServiceModule:
+ s = "OPTera Metro 1200 Ethernet Service Module"
+ case NDPChassisOPTera8010co:
+ s = "OPTera 8010co"
+ case NDPChassisOPTera8610coL3Switch:
+ s = "OPTera 8610co L3 switch"
+ case NDPChassisOPTera8110coL2Switch:
+ s = "OPTera 8110co L2 switch"
+ case NDPChassisOPTera8003:
+ s = "OPTera 8003"
+ case NDPChassisOPTera8603L3Switch:
+ s = "OPTera 8603 L3 switch"
+ case NDPChassisOPTera8103L2Switch:
+ s = "OPTera 8103 L2 switch"
+ case NDPChassisBayStack380101001000Switch:
+ s = "BayStack 380 10/100/1000 Switch"
+ case NDPChassisEthernetSwitch47048T:
+ s = "Ethernet Switch 470-48T"
+ case NDPChassisOPTeraMetro1450EthernetServiceModule:
+ s = "OPTera Metro 1450 Ethernet Service Module"
+ case NDPChassisOPTeraMetro1400EthernetServiceModule:
+ s = "OPTera Metro 1400 Ethernet Service Module"
+ case NDPChassisAlteonSwitchFamily:
+ s = "Alteon Switch Family"
+ case NDPChassisEthernetSwitch46024TPWR:
+ s = "Ethernet Switch 460-24T-PWR"
+ case NDPChassisOPTeraMetro8010OPML2Switch:
+ s = "OPTera Metro 8010 OPM L2 Switch"
+ case NDPChassisOPTeraMetro8010coOPML2Switch:
+ s = "OPTera Metro 8010co OPM L2 Switch"
+ case NDPChassisOPTeraMetro8006OPML2Switch:
+ s = "OPTera Metro 8006 OPM L2 Switch"
+ case NDPChassisOPTeraMetro8003OPML2Switch:
+ s = "OPTera Metro 8003 OPM L2 Switch"
+ case NDPChassisAlteon180e:
+ s = "Alteon 180e"
+ case NDPChassisAlteonAD3:
+ s = "Alteon AD3"
+ case NDPChassisAlteon184:
+ s = "Alteon 184"
+ case NDPChassisAlteonAD4:
+ s = "Alteon AD4"
+ case NDPChassisPassport1424L3Switch:
+ s = "Passport 1424 L3 switch"
+ case NDPChassisPassport1648L3Switch:
+ s = "Passport 1648 L3 switch"
+ case NDPChassisPassport1612L3Switch:
+ s = "Passport 1612 L3 switch"
+ case NDPChassisPassport1624L3Switch:
+ s = "Passport 1624 L3 switch"
+ case NDPChassisBayStack38024FFiber1000Switch:
+ s = "BayStack 380-24F Fiber 1000 Switch"
+ case NDPChassisEthernetRoutingSwitch551024T:
+ s = "Ethernet Routing Switch 5510-24T"
+ case NDPChassisEthernetRoutingSwitch551048T:
+ s = "Ethernet Routing Switch 5510-48T"
+ case NDPChassisEthernetSwitch47024T:
+ s = "Ethernet Switch 470-24T"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2220:
+ s = "Nortel Networks Wireless LAN Access Point 2220"
+ case NDPChassisPassportRBS2402L3Switch:
+ s = "Passport RBS 2402 L3 switch"
+ case NDPChassisAlteonApplicationSwitch2424:
+ s = "Alteon Application Switch 2424"
+ case NDPChassisAlteonApplicationSwitch2224:
+ s = "Alteon Application Switch 2224"
+ case NDPChassisAlteonApplicationSwitch2208:
+ s = "Alteon Application Switch 2208"
+ case NDPChassisAlteonApplicationSwitch2216:
+ s = "Alteon Application Switch 2216"
+ case NDPChassisAlteonApplicationSwitch3408:
+ s = "Alteon Application Switch 3408"
+ case NDPChassisAlteonApplicationSwitch3416:
+ s = "Alteon Application Switch 3416"
+ case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250:
+ s = "Nortel Networks Wireless LAN SecuritySwitch 2250"
+ case NDPChassisEthernetSwitch42548T:
+ s = "Ethernet Switch 425-48T"
+ case NDPChassisEthernetSwitch42524T:
+ s = "Ethernet Switch 425-24T"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2221:
+ s = "Nortel Networks Wireless LAN Access Point 2221"
+ case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch:
+ s = "Nortel Metro Ethernet Service Unit 24-T SPF switch"
+ case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch:
+ s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch"
+ case NDPChassisPassport830010slotchassis:
+ s = "Passport 8300 10-slot chassis"
+ case NDPChassisPassport83006slotchassis:
+ s = "Passport 8300 6-slot chassis"
+ case NDPChassisEthernetRoutingSwitch552024TPWR:
+ s = "Ethernet Routing Switch 5520-24T-PWR"
+ case NDPChassisEthernetRoutingSwitch552048TPWR:
+ s = "Ethernet Routing Switch 5520-48T-PWR"
+ case NDPChassisNortelNetworksVPNGateway3050:
+ s = "Nortel Networks VPN Gateway 3050"
+ case NDPChassisAlteonSSL31010100:
+ s = "Alteon SSL 310 10/100"
+ case NDPChassisAlteonSSL31010100Fiber:
+ s = "Alteon SSL 310 10/100 Fiber"
+ case NDPChassisAlteonSSL31010100FIPS:
+ s = "Alteon SSL 310 10/100 FIPS"
+ case NDPChassisAlteonSSL410101001000:
+ s = "Alteon SSL 410 10/100/1000"
+ case NDPChassisAlteonSSL410101001000Fiber:
+ s = "Alteon SSL 410 10/100/1000 Fiber"
+ case NDPChassisAlteonApplicationSwitch2424SSL:
+ s = "Alteon Application Switch 2424-SSL"
+ case NDPChassisEthernetSwitch32524T:
+ s = "Ethernet Switch 325-24T"
+ case NDPChassisEthernetSwitch32524G:
+ s = "Ethernet Switch 325-24G"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2225:
+ s = "Nortel Networks Wireless LAN Access Point 2225"
+ case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270:
+ s = "Nortel Networks Wireless LAN SecuritySwitch 2270"
+ case NDPChassis24portEthernetSwitch47024TPWR:
+ s = "24-port Ethernet Switch 470-24T-PWR"
+ case NDPChassis48portEthernetSwitch47048TPWR:
+ s = "48-port Ethernet Switch 470-48T-PWR"
+ case NDPChassisEthernetRoutingSwitch553024TFD:
+ s = "Ethernet Routing Switch 5530-24TFD"
+ case NDPChassisEthernetSwitch351024T:
+ s = "Ethernet Switch 3510-24T"
+ case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch:
+ s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch"
+ case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch:
+ s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch"
+ case NDPChassisNortelSecureAccessSwitch:
+ s = "Nortel Secure Access Switch"
+ case NDPChassisNortelNetworksVPNGateway3070:
+ s = "Nortel Networks VPN Gateway 3070"
+ case NDPChassisOPTeraMetro3500:
+ s = "OPTera Metro 3500"
+ case NDPChassisSMBBES101024T:
+ s = "SMB BES 1010 24T"
+ case NDPChassisSMBBES101048T:
+ s = "SMB BES 1010 48T"
+ case NDPChassisSMBBES102024TPWR:
+ s = "SMB BES 1020 24T PWR"
+ case NDPChassisSMBBES102048TPWR:
+ s = "SMB BES 1020 48T PWR"
+ case NDPChassisSMBBES201024T:
+ s = "SMB BES 2010 24T"
+ case NDPChassisSMBBES201048T:
+ s = "SMB BES 2010 48T"
+ case NDPChassisSMBBES202024TPWR:
+ s = "SMB BES 2020 24T PWR"
+ case NDPChassisSMBBES202048TPWR:
+ s = "SMB BES 2020 48T PWR"
+ case NDPChassisSMBBES11024T:
+ s = "SMB BES 110 24T"
+ case NDPChassisSMBBES11048T:
+ s = "SMB BES 110 48T"
+ case NDPChassisSMBBES12024TPWR:
+ s = "SMB BES 120 24T PWR"
+ case NDPChassisSMBBES12048TPWR:
+ s = "SMB BES 120 48T PWR"
+ case NDPChassisSMBBES21024T:
+ s = "SMB BES 210 24T"
+ case NDPChassisSMBBES21048T:
+ s = "SMB BES 210 48T"
+ case NDPChassisSMBBES22024TPWR:
+ s = "SMB BES 220 24T PWR"
+ case NDPChassisSMBBES22048TPWR:
+ s = "SMB BES 220 48T PWR"
+ case NDPChassisOME6500:
+ s = "OME 6500"
+ case NDPChassisEthernetRoutingSwitch4548GT:
+ s = "Ethernet Routing Switch 4548GT"
+ case NDPChassisEthernetRoutingSwitch4548GTPWR:
+ s = "Ethernet Routing Switch 4548GT-PWR"
+ case NDPChassisEthernetRoutingSwitch4550T:
+ s = "Ethernet Routing Switch 4550T"
+ case NDPChassisEthernetRoutingSwitch4550TPWR:
+ s = "Ethernet Routing Switch 4550T-PWR"
+ case NDPChassisEthernetRoutingSwitch4526FX:
+ s = "Ethernet Routing Switch 4526FX"
+ case NDPChassisEthernetRoutingSwitch250026T:
+ s = "Ethernet Routing Switch 2500-26T"
+ case NDPChassisEthernetRoutingSwitch250026TPWR:
+ s = "Ethernet Routing Switch 2500-26T-PWR"
+ case NDPChassisEthernetRoutingSwitch250050T:
+ s = "Ethernet Routing Switch 2500-50T"
+ case NDPChassisEthernetRoutingSwitch250050TPWR:
+ s = "Ethernet Routing Switch 2500-50T-PWR"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t NDPBackplaneType) String() (s string) {
+ switch t {
+ case NDPBackplaneOther:
+ s = "Other"
+ case NDPBackplaneEthernet:
+ s = "Ethernet"
+ case NDPBackplaneEthernetTokenring:
+ s = "Ethernet and Tokenring"
+ case NDPBackplaneEthernetFDDI:
+ s = "Ethernet and FDDI"
+ case NDPBackplaneEthernetTokenringFDDI:
+ s = "Ethernet, Tokenring and FDDI"
+ case NDPBackplaneEthernetTokenringRedundantPower:
+ s = "Ethernet and Tokenring with redundant power"
+ case NDPBackplaneEthernetTokenringFDDIRedundantPower:
+ s = "Ethernet, Tokenring, FDDI with redundant power"
+ case NDPBackplaneTokenRing:
+ s = "Token Ring"
+ case NDPBackplaneEthernetTokenringFastEthernet:
+ s = "Ethernet, Tokenring and Fast Ethernet"
+ case NDPBackplaneEthernetFastEthernet:
+ s = "Ethernet and Fast Ethernet"
+ case NDPBackplaneEthernetTokenringFastEthernetRedundantPower:
+ s = "Ethernet, Tokenring, Fast Ethernet with redundant power"
+ case NDPBackplaneEthernetFastEthernetGigabitEthernet:
+ s = "Ethernet, Fast Ethernet and Gigabit Ethernet"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t NDPState) String() (s string) {
+ switch t {
+ case NDPStateTopology:
+ s = "Topology Change"
+ case NDPStateHeartbeat:
+ s = "Heartbeat"
+ case NDPStateNew:
+ s = "New"
+ default:
+ s = "Unknown"
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/google/gopacket/layers/ntp.go
new file mode 100644
index 0000000..33c15b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ntp.go
@@ -0,0 +1,416 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// Network Time Protocol (NTP) Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for NTP.
+//
+//******************************************************************************
+//
+// About The Network Time Protocol (NTP)
+// -------------------------------------
+// NTP is a protocol that enables computers on the internet to set their
+// clocks to the correct time (or to a time that is acceptably close to the
+// correct time). NTP runs on top of UDP.
+//
+// There have been a series of versions of the NTP protocol. The latest
+// version is V4 and is specified in RFC 5905:
+// http://www.ietf.org/rfc/rfc5905.txt
+//
+//******************************************************************************
+//
+// References
+// ----------
+//
+// Wikipedia's NTP entry:
+// https://en.wikipedia.org/wiki/Network_Time_Protocol
+// This is the best place to get an overview of NTP.
+//
+// Network Time Protocol Home Website:
+// http://www.ntp.org/
+// This appears to be the official website of NTP.
+//
+// List of current NTP Protocol RFCs:
+// http://www.ntp.org/rfc.html
+//
+// RFC 958: "Network Time Protocol (NTP)" (1985)
+// https://tools.ietf.org/html/rfc958
+// This is the original NTP specification.
+//
+// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992)
+// https://tools.ietf.org/html/rfc1305
+// The protocol was updated in 1992 yielding NTP V3.
+//
+// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010)
+// https://www.ietf.org/rfc/rfc5905.txt
+// The protocol was updated in 2010 yielding NTP V4.
+// V4 is backwards compatible with all previous versions of NTP.
+//
+// RFC 5906: "Network Time Protocol Version 4: Autokey Specification"
+// https://tools.ietf.org/html/rfc5906
+// This document addresses the security of the NTP protocol
+// and is probably not relevant to this package.
+//
+// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)"
+// https://tools.ietf.org/html/rfc5907
+// This document addresses the management of NTP servers and
+// is probably not relevant to this package.
+//
+// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6"
+// https://tools.ietf.org/html/rfc5908
+// This document addresses the use of NTP in DHCPv6 and is
+// probably not relevant to this package.
+//
+// "Let's make a NTP Client in C"
+// https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html
+// This web page contains useful information about the details of NTP,
+// including an NTP record struture in C, and C code.
+//
+// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)"
+// http://what-when-how.com/computer-network-time-synchronization/
+// ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/
+// This web page contains useful information on the details of NTP.
+//
+// "Technical information - NTP Data Packet"
+// https://www.meinbergglobal.com/english/info/ntp-packet.htm
+// This page has a helpful diagram of an NTP V4 packet.
+//
+//******************************************************************************
+//
+// Obsolete References
+// -------------------
+//
+// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989)
+// https://tools.ietf.org/html/rfc1119
+// Version 2 was drafted in 1989.
+// It is unclear whether V2 was ever implememented or whether the
+// ideas ended up in V3 (which was implemented in 1992).
+//
+// RFC 1361: "Simple Network Time Protocol (SNTP)"
+// https://tools.ietf.org/html/rfc1361
+// This document is obsoleted by RFC 1769 and is included only for completeness.
+//
+// RFC 1769: "Simple Network Time Protocol (SNTP)"
+// https://tools.ietf.org/html/rfc1769
+// This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness.
+//
+// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+// https://tools.ietf.org/html/rfc2030
+// This document is obsoleted by RFC 4330 and is included only for completeness.
+//
+// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+// https://tools.ietf.org/html/rfc4330
+// This document is obsoleted by RFC 5905 and is included only for completeness.
+//
+//******************************************************************************
+//
+// Endian And Bit Numbering Issues
+// -------------------------------
+//
+// Endian and bit numbering issues can be confusing. Here is some
+// clarification:
+//
+// ENDIAN: Values are sent big endian.
+// https://en.wikipedia.org/wiki/Endianness
+//
+// BIT NUMBERING: Bits are numbered 0 upwards from the most significant
+// bit to the least significant bit. This means that if there is a 32-bit
+// value, the most significant bit is called bit 0 and the least
+// significant bit is called bit 31.
+//
+// See RFC 791 Appendix B for more discussion.
+//
+//******************************************************************************
+//
+// NTP V3 and V4 Packet Format
+// ---------------------------
+// NTP packets are UDP packets whose payload contains an NTP record.
+//
+// The NTP RFC defines the format of the NTP record.
+//
+// There have been four versions of the protocol:
+//
+// V1 in 1985
+// V2 in 1989
+// V3 in 1992
+// V4 in 2010
+//
+// It is clear that V1 and V2 are obsolete, and there is no need to
+// cater for these formats.
+//
+// V3 and V4 essentially use the same format, with V4 adding some optional
+// fields on the end. So this package supports the V3 and V4 formats.
+//
+// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains
+// the following diagram for the NTP record format:
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |LI | VN |Mode | Stratum | Poll | Precision |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Delay |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Dispersion |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Reference ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Reference Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Origin Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Receive Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Transmit Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 1 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 2 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Key Identifier |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// | dgst (128) |
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// From http://www.ietf.org/rfc/rfc5905.txt
+//
+// The fields "Extension Field 1 (variable)" and later are optional fields,
+// and so we can set a minimum NTP record size of 48 bytes.
+//
+const ntpMinimumRecordSizeInBytes int = 48
+
+//******************************************************************************
+
+// NTP Type
+// --------
+// Type NTP implements the DecodingLayer interface. Each NTP object
+// represents in a structured form the NTP record present as the UDP
+// payload in an NTP UDP packet.
+//
+
+type NTPLeapIndicator uint8
+type NTPVersion uint8
+type NTPMode uint8
+type NTPStratum uint8
+type NTPLog2Seconds int8
+type NTPFixed16Seconds uint32
+type NTPReferenceID uint32
+type NTPTimestamp uint64
+
+type NTP struct {
+ BaseLayer // Stores the packet bytes and payload bytes.
+
+ LeapIndicator NTPLeapIndicator // [0,3]. Indicates whether leap second(s) is to be added.
+ Version NTPVersion // [0,7]. Version of the NTP protocol.
+ Mode NTPMode // [0,7]. Mode.
+ Stratum NTPStratum // [0,255]. Stratum of time server in the server tree.
+ Poll NTPLog2Seconds // [-128,127]. The maximum interval between successive messages, in log2 seconds.
+ Precision NTPLog2Seconds // [-128,127]. The precision of the system clock, in log2 seconds.
+ RootDelay NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16.
+ RootDispersion NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16.
+ ReferenceID NTPReferenceID // ID code of reference clock [0,2^32-1].
+ ReferenceTimestamp NTPTimestamp // Most recent timestamp from the reference clock.
+ OriginTimestamp NTPTimestamp // Local time when request was sent from local host.
+ ReceiveTimestamp NTPTimestamp // Local time (on server) that request arrived at server host.
+ TransmitTimestamp NTPTimestamp // Local time (on server) that request departed server host.
+
+ // FIX: This package should analyse the extension fields and represent the extension fields too.
+ ExtensionBytes []byte // Just put extensions in a byte slice.
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the NTP object, which is LayerTypeNTP.
+func (d *NTP) LayerType() gopacket.LayerType {
+ return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// decodeNTP analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the NTP layer.
+func decodeNTP(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &NTP{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return nil
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// Upon succeeds, it loads the NTP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a NTP record, then return an error.
+ if len(data) < ntpMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("NTP packet too short")
+ }
+
+ // RFC 5905 does not appear to define a maximum NTP record length.
+ // The protocol allows "extension fields" to be included in the record,
+ // and states about these fields:"
+ //
+ // "While the minimum field length containing required fields is
+ // four words (16 octets), a maximum field length remains to be
+ // established."
+ //
+ // For this reason, the packet length is not checked here for being too long.
+
+ // NTP type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level.
+ // Payload is supposed to contain the payload of this level.
+ // Here we set the baselayer to be the bytes of the NTP record.
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // To make sense of this, refer to the packet diagram
+ // above and the section on endian conventions.
+
+ // The first few fields are all packed into the first 32 bits. Unpack them.
+ f := data[0]
+ d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6)
+ d.Version = NTPVersion((f & 0x38) >> 3)
+ d.Mode = NTPMode(f & 0x07)
+ d.Stratum = NTPStratum(data[1])
+ d.Poll = NTPLog2Seconds(data[2])
+ d.Precision = NTPLog2Seconds(data[3])
+
+ // The remaining fields can just be copied in big endian order.
+ d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8]))
+ d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12]))
+ d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16]))
+ d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24]))
+ d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32]))
+ d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40]))
+ d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48]))
+
+ // This layer does not attempt to analyse the extension bytes.
+ // But if there are any, we'd like the user to know. So we just
+ // place them all in an ExtensionBytes field.
+ d.ExtensionBytes = data[48:]
+
+ // Return no error.
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes)
+ if err != nil {
+ return err
+ }
+
+ // Pack the first few fields into the first 32 bits.
+ h := uint8(0)
+ h |= (uint8(d.LeapIndicator) << 6) & 0xC0
+ h |= (uint8(d.Version) << 3) & 0x38
+ h |= (uint8(d.Mode)) & 0x07
+ data[0] = byte(h)
+ data[1] = byte(d.Stratum)
+ data[2] = byte(d.Poll)
+ data[3] = byte(d.Precision)
+
+ // The remaining fields can just be copied in big endian order.
+ binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay))
+ binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion))
+ binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID))
+ binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp))
+ binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp))
+ binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp))
+ binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp))
+
+ ex, err := b.AppendBytes(len(d.ExtensionBytes))
+ if err != nil {
+ return err
+ }
+ copy(ex, d.ExtensionBytes)
+
+ return nil
+}
+
+//******************************************************************************
+
+// CanDecode returns a set of layers that NTP objects can decode.
+// As NTP objects can only decide the NTP layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *NTP) CanDecode() gopacket.LayerClass {
+ return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (NTP) layer. As NTP packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *NTP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+//******************************************************************************
+
+// NTP packets do not carry any data payload, so the empty byte slice is retured.
+// In Go, a nil slice is functionally identical to an empty slice, so we
+// return nil to avoid a heap allocation.
+func (d *NTP) Payload() []byte {
+ return nil
+}
+
+//******************************************************************************
+//* End Of NTP File *
+//******************************************************************************
diff --git a/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/google/gopacket/layers/ospf.go
new file mode 100644
index 0000000..f3f2ca9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ospf.go
@@ -0,0 +1,680 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// OSPFType denotes what kind of OSPF type it is
+type OSPFType uint8
+
+// Potential values for OSPF.Type.
+const (
+ OSPFHello OSPFType = 1
+ OSPFDatabaseDescription OSPFType = 2
+ OSPFLinkStateRequest OSPFType = 3
+ OSPFLinkStateUpdate OSPFType = 4
+ OSPFLinkStateAcknowledgment OSPFType = 5
+)
+
+// LSA Function Codes for LSAheader.LSType
+const (
+ RouterLSAtypeV2 = 0x1
+ RouterLSAtype = 0x2001
+ NetworkLSAtypeV2 = 0x2
+ NetworkLSAtype = 0x2002
+ SummaryLSANetworktypeV2 = 0x3
+ InterAreaPrefixLSAtype = 0x2003
+ SummaryLSAASBRtypeV2 = 0x4
+ InterAreaRouterLSAtype = 0x2004
+ ASExternalLSAtypeV2 = 0x5
+ ASExternalLSAtype = 0x4005
+ NSSALSAtype = 0x2007
+ LinkLSAtype = 0x0008
+ IntraAreaPrefixLSAtype = 0x2009
+)
+
+// String conversions for OSPFType
+func (i OSPFType) String() string {
+ switch i {
+ case OSPFHello:
+ return "Hello"
+ case OSPFDatabaseDescription:
+ return "Database Description"
+ case OSPFLinkStateRequest:
+ return "Link State Request"
+ case OSPFLinkStateUpdate:
+ return "Link State Update"
+ case OSPFLinkStateAcknowledgment:
+ return "Link State Acknowledgment"
+ default:
+ return ""
+ }
+}
+
+// Prefix extends IntraAreaPrefixLSA
+type Prefix struct {
+ PrefixLength uint8
+ PrefixOptions uint8
+ Metric uint16
+ AddressPrefix []byte
+}
+
+// IntraAreaPrefixLSA is the struct from RFC 5340 A.4.10.
+type IntraAreaPrefixLSA struct {
+ NumOfPrefixes uint16
+ RefLSType uint16
+ RefLinkStateID uint32
+ RefAdvRouter uint32
+ Prefixes []Prefix
+}
+
+// LinkLSA is the struct from RFC 5340 A.4.9.
+type LinkLSA struct {
+ RtrPriority uint8
+ Options uint32
+ LinkLocalAddress []byte
+ NumOfPrefixes uint32
+ Prefixes []Prefix
+}
+
+// ASExternalLSAV2 is the struct from RFC 2328 A.4.5.
+type ASExternalLSAV2 struct {
+ NetworkMask uint32
+ ExternalBit uint8
+ Metric uint32
+ ForwardingAddress uint32
+ ExternalRouteTag uint32
+}
+
+// ASExternalLSA is the struct from RFC 5340 A.4.7.
+type ASExternalLSA struct {
+ Flags uint8
+ Metric uint32
+ PrefixLength uint8
+ PrefixOptions uint8
+ RefLSType uint16
+ AddressPrefix []byte
+ ForwardingAddress []byte
+ ExternalRouteTag uint32
+ RefLinkStateID uint32
+}
+
+// InterAreaRouterLSA is the struct from RFC 5340 A.4.6.
+type InterAreaRouterLSA struct {
+ Options uint32
+ Metric uint32
+ DestinationRouterID uint32
+}
+
+// InterAreaPrefixLSA is the struct from RFC 5340 A.4.5.
+type InterAreaPrefixLSA struct {
+ Metric uint32
+ PrefixLength uint8
+ PrefixOptions uint8
+ AddressPrefix []byte
+}
+
+// NetworkLSA is the struct from RFC 5340 A.4.4.
+type NetworkLSA struct {
+ Options uint32
+ AttachedRouter []uint32
+}
+
+// RouterV2 extends RouterLSAV2
+type RouterV2 struct {
+ Type uint8
+ LinkID uint32
+ LinkData uint32
+ Metric uint16
+}
+
+// RouterLSAV2 is the struct from RFC 2328 A.4.2.
+type RouterLSAV2 struct {
+ Flags uint8
+ Links uint16
+ Routers []RouterV2
+}
+
+// Router extends RouterLSA
+type Router struct {
+ Type uint8
+ Metric uint16
+ InterfaceID uint32
+ NeighborInterfaceID uint32
+ NeighborRouterID uint32
+}
+
+// RouterLSA is the struct from RFC 5340 A.4.3.
+type RouterLSA struct {
+ Flags uint8
+ Options uint32
+ Routers []Router
+}
+
+// LSAheader is the struct from RFC 5340 A.4.2 and RFC 2328 A.4.1.
+type LSAheader struct {
+ LSAge uint16
+ LSType uint16
+ LinkStateID uint32
+ AdvRouter uint32
+ LSSeqNumber uint32
+ LSChecksum uint16
+ Length uint16
+ LSOptions uint8
+}
+
+// LSA links LSAheader with the structs from RFC 5340 A.4.
+type LSA struct {
+ LSAheader
+ Content interface{}
+}
+
+// LSUpdate is the struct from RFC 5340 A.3.5.
+type LSUpdate struct {
+ NumOfLSAs uint32
+ LSAs []LSA
+}
+
+// LSReq is the struct from RFC 5340 A.3.4.
+type LSReq struct {
+ LSType uint16
+ LSID uint32
+ AdvRouter uint32
+}
+
+// DbDescPkg is the struct from RFC 5340 A.3.3.
+type DbDescPkg struct {
+ Options uint32
+ InterfaceMTU uint16
+ Flags uint16
+ DDSeqNumber uint32
+ LSAinfo []LSAheader
+}
+
+// HelloPkg is the struct from RFC 5340 A.3.2.
+type HelloPkg struct {
+ InterfaceID uint32
+ RtrPriority uint8
+ Options uint32
+ HelloInterval uint16
+ RouterDeadInterval uint32
+ DesignatedRouterID uint32
+ BackupDesignatedRouterID uint32
+ NeighborID []uint32
+}
+
+// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information
+type HelloPkgV2 struct {
+ HelloPkg
+ NetworkMask uint32
+}
+
+// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3.
+type OSPF struct {
+ Version uint8
+ Type OSPFType
+ PacketLength uint16
+ RouterID uint32
+ AreaID uint32
+ Checksum uint16
+ Content interface{}
+}
+
+//OSPFv2 extend the OSPF head with version 2 specific fields
+type OSPFv2 struct {
+ BaseLayer
+ OSPF
+ AuType uint16
+ Authentication uint64
+}
+
+// OSPFv3 extend the OSPF head with version 3 specific fields
+type OSPFv3 struct {
+ BaseLayer
+ OSPF
+ Instance uint8
+ Reserved uint8
+}
+
+// getLSAsv2 parses the LSA information from the packet for OSPFv2
+func getLSAsv2(num uint32, data []byte) ([]LSA, error) {
+ var lsas []LSA
+ var i uint32 = 0
+ var offset uint32 = 0
+ for ; i < num; i++ {
+ lstype := uint16(data[offset+3])
+ lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+ content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+ if err != nil {
+ return nil, fmt.Errorf("Could not extract Link State type.")
+ }
+ lsa := LSA{
+ LSAheader: LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[offset : offset+2]),
+ LSOptions: data[offset+2],
+ LSType: lstype,
+ LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+ Length: lsalength,
+ },
+ Content: content,
+ }
+ lsas = append(lsas, lsa)
+ offset += uint32(lsalength)
+ }
+ return lsas, nil
+}
+
+// extractLSAInformation extracts all the LSA information
+func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) {
+ if lsalength < 20 {
+ return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20)
+ }
+ if len(data) < int(lsalength) {
+ return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength)
+ }
+ var content interface{}
+ switch lstype {
+ case RouterLSAtypeV2:
+ var routers []RouterV2
+ links := binary.BigEndian.Uint16(data[22:24])
+ content = RouterLSAV2{
+ Flags: data[20],
+ Links: links,
+ Routers: routers,
+ }
+ case ASExternalLSAtypeV2:
+ content = ASExternalLSAV2{
+ NetworkMask: binary.BigEndian.Uint32(data[20:24]),
+ ExternalBit: data[24] & 0x80,
+ Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+ ForwardingAddress: binary.BigEndian.Uint32(data[28:32]),
+ ExternalRouteTag: binary.BigEndian.Uint32(data[32:36]),
+ }
+ case RouterLSAtype:
+ var routers []Router
+ var j uint32
+ for j = 24; j < uint32(lsalength); j += 16 {
+ router := Router{
+ Type: uint8(data[j]),
+ Metric: binary.BigEndian.Uint16(data[j+2 : j+4]),
+ InterfaceID: binary.BigEndian.Uint32(data[j+4 : j+8]),
+ NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]),
+ NeighborRouterID: binary.BigEndian.Uint32(data[j+12 : j+16]),
+ }
+ routers = append(routers, router)
+ }
+ content = RouterLSA{
+ Flags: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ Routers: routers,
+ }
+ case NetworkLSAtype:
+ var routers []uint32
+ var j uint32
+ for j = 24; j < uint32(lsalength); j += 4 {
+ routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+ }
+ content = NetworkLSA{
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ AttachedRouter: routers,
+ }
+ case InterAreaPrefixLSAtype:
+ content = InterAreaPrefixLSA{
+ Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ PrefixLength: uint8(data[24]),
+ PrefixOptions: uint8(data[25]),
+ AddressPrefix: data[28:uint32(lsalength)],
+ }
+ case InterAreaRouterLSAtype:
+ content = InterAreaRouterLSA{
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+ DestinationRouterID: binary.BigEndian.Uint32(data[28:32]),
+ }
+ case ASExternalLSAtype:
+ fallthrough
+ case NSSALSAtype:
+
+ flags := uint8(data[20])
+ prefixLen := uint8(data[24]) / 8
+ var forwardingAddress []byte
+ if (flags & 0x02) == 0x02 {
+ forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16]
+ }
+ content = ASExternalLSA{
+ Flags: flags,
+ Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[25]),
+ RefLSType: binary.BigEndian.Uint16(data[26:28]),
+ AddressPrefix: data[28 : 28+uint32(prefixLen)],
+ ForwardingAddress: forwardingAddress,
+ }
+ case LinkLSAtype:
+ var prefixes []Prefix
+ var prefixOffset uint32 = 44
+ var j uint32
+ numOfPrefixes := binary.BigEndian.Uint32(data[40:44])
+ for j = 0; j < numOfPrefixes; j++ {
+ prefixLen := uint8(data[prefixOffset])
+ prefix := Prefix{
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[prefixOffset+1]),
+ AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+ }
+ prefixes = append(prefixes, prefix)
+ prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8
+ }
+ content = LinkLSA{
+ RtrPriority: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ LinkLocalAddress: data[24:40],
+ NumOfPrefixes: numOfPrefixes,
+ Prefixes: prefixes,
+ }
+ case IntraAreaPrefixLSAtype:
+ var prefixes []Prefix
+ var prefixOffset uint32 = 32
+ var j uint16
+ numOfPrefixes := binary.BigEndian.Uint16(data[20:22])
+ for j = 0; j < numOfPrefixes; j++ {
+ prefixLen := uint8(data[prefixOffset])
+ prefix := Prefix{
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[prefixOffset+1]),
+ Metric: binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]),
+ AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+ }
+ prefixes = append(prefixes, prefix)
+ prefixOffset = prefixOffset + 4 + uint32(prefixLen)
+ }
+ content = IntraAreaPrefixLSA{
+ NumOfPrefixes: numOfPrefixes,
+ RefLSType: binary.BigEndian.Uint16(data[22:24]),
+ RefLinkStateID: binary.BigEndian.Uint32(data[24:28]),
+ RefAdvRouter: binary.BigEndian.Uint32(data[28:32]),
+ Prefixes: prefixes,
+ }
+ default:
+ return nil, fmt.Errorf("Unknown Link State type.")
+ }
+ return content, nil
+}
+
+// getLSAs parses the LSA information from the packet for OSPFv3
+func getLSAs(num uint32, data []byte) ([]LSA, error) {
+ var lsas []LSA
+ var i uint32 = 0
+ var offset uint32 = 0
+ for ; i < num; i++ {
+ var content interface{}
+ lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+
+ content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+ if err != nil {
+ return nil, fmt.Errorf("Could not extract Link State type.")
+ }
+ lsa := LSA{
+ LSAheader: LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[offset : offset+2]),
+ LSType: lstype,
+ LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+ Length: lsalength,
+ },
+ Content: content,
+ }
+ lsas = append(lsas, lsa)
+ offset += uint32(lsalength)
+ }
+ return lsas, nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 24 {
+ return fmt.Errorf("Packet too smal for OSPF Version 2")
+ }
+
+ ospf.Version = uint8(data[0])
+ ospf.Type = OSPFType(data[1])
+ ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+ ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+ ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+ ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+ ospf.AuType = binary.BigEndian.Uint16(data[14:16])
+ ospf.Authentication = binary.BigEndian.Uint64(data[16:24])
+
+ switch ospf.Type {
+ case OSPFHello:
+ var neighbors []uint32
+ for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 {
+ neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+ }
+ ospf.Content = HelloPkgV2{
+ NetworkMask: binary.BigEndian.Uint32(data[24:28]),
+ HelloPkg: HelloPkg{
+ HelloInterval: binary.BigEndian.Uint16(data[28:30]),
+ Options: uint32(data[30]),
+ RtrPriority: uint8(data[31]),
+ RouterDeadInterval: binary.BigEndian.Uint32(data[32:36]),
+ DesignatedRouterID: binary.BigEndian.Uint32(data[36:40]),
+ BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]),
+ NeighborID: neighbors,
+ },
+ }
+ case OSPFDatabaseDescription:
+ var lsas []LSAheader
+ for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = DbDescPkg{
+ InterfaceMTU: binary.BigEndian.Uint16(data[24:26]),
+ Options: uint32(data[26]),
+ Flags: uint16(data[27]),
+ DDSeqNumber: binary.BigEndian.Uint32(data[28:32]),
+ LSAinfo: lsas,
+ }
+ case OSPFLinkStateRequest:
+ var lsrs []LSReq
+ for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 {
+ lsr := LSReq{
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LSID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ }
+ lsrs = append(lsrs, lsr)
+ }
+ ospf.Content = lsrs
+ case OSPFLinkStateUpdate:
+ num := binary.BigEndian.Uint32(data[24:28])
+
+ lsas, err := getLSAsv2(num, data[28:])
+ if err != nil {
+ return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+ }
+ ospf.Content = LSUpdate{
+ NumOfLSAs: num,
+ LSAs: lsas,
+ }
+ case OSPFLinkStateAcknowledgment:
+ var lsas []LSAheader
+ for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSOptions: data[i+2],
+ LSType: uint16(data[i+3]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = lsas
+ }
+ return nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ if len(data) < 16 {
+ return fmt.Errorf("Packet too smal for OSPF Version 3")
+ }
+
+ ospf.Version = uint8(data[0])
+ ospf.Type = OSPFType(data[1])
+ ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+ ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+ ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+ ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+ ospf.Instance = uint8(data[14])
+ ospf.Reserved = uint8(data[15])
+
+ switch ospf.Type {
+ case OSPFHello:
+ var neighbors []uint32
+ for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 {
+ neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+ }
+ ospf.Content = HelloPkg{
+ InterfaceID: binary.BigEndian.Uint32(data[16:20]),
+ RtrPriority: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[21:25]) >> 8,
+ HelloInterval: binary.BigEndian.Uint16(data[24:26]),
+ RouterDeadInterval: uint32(binary.BigEndian.Uint16(data[26:28])),
+ DesignatedRouterID: binary.BigEndian.Uint32(data[28:32]),
+ BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]),
+ NeighborID: neighbors,
+ }
+ case OSPFDatabaseDescription:
+ var lsas []LSAheader
+ for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = DbDescPkg{
+ Options: binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF,
+ InterfaceMTU: binary.BigEndian.Uint16(data[20:22]),
+ Flags: binary.BigEndian.Uint16(data[22:24]),
+ DDSeqNumber: binary.BigEndian.Uint32(data[24:28]),
+ LSAinfo: lsas,
+ }
+ case OSPFLinkStateRequest:
+ var lsrs []LSReq
+ for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 {
+ lsr := LSReq{
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LSID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ }
+ lsrs = append(lsrs, lsr)
+ }
+ ospf.Content = lsrs
+ case OSPFLinkStateUpdate:
+ num := binary.BigEndian.Uint32(data[16:20])
+ lsas, err := getLSAs(num, data[20:])
+ if err != nil {
+ return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+ }
+ ospf.Content = LSUpdate{
+ NumOfLSAs: num,
+ LSAs: lsas,
+ }
+
+ case OSPFLinkStateAcknowledgment:
+ var lsas []LSAheader
+ for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = lsas
+ default:
+ }
+
+ return nil
+}
+
+// LayerType returns LayerTypeOSPF
+func (ospf *OSPFv2) LayerType() gopacket.LayerType {
+ return LayerTypeOSPF
+}
+func (ospf *OSPFv3) LayerType() gopacket.LayerType {
+ return LayerTypeOSPF
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (ospf *OSPFv2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+func (ospf *OSPFv3) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ospf *OSPFv2) CanDecode() gopacket.LayerClass {
+ return LayerTypeOSPF
+}
+func (ospf *OSPFv3) CanDecode() gopacket.LayerClass {
+ return LayerTypeOSPF
+}
+
+func decodeOSPF(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 14 {
+ return fmt.Errorf("Packet too smal for OSPF")
+ }
+
+ switch uint8(data[0]) {
+ case 2:
+ ospf := &OSPFv2{}
+ return decodingLayerDecoder(ospf, data, p)
+ case 3:
+ ospf := &OSPFv3{}
+ return decodingLayerDecoder(ospf, data, p)
+ default:
+ }
+
+ return fmt.Errorf("Unable to determine OSPF type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/google/gopacket/layers/pflog.go
new file mode 100644
index 0000000..853882f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pflog.go
@@ -0,0 +1,76 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+type PFDirection uint8
+
+const (
+ PFDirectionInOut PFDirection = 0
+ PFDirectionIn PFDirection = 1
+ PFDirectionOut PFDirection = 2
+)
+
+// PFLog provides the layer for 'pf' packet-filter logging, as described at
+// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4
+type PFLog struct {
+ BaseLayer
+ Length uint8
+ Family ProtocolFamily
+ Action, Reason uint8
+ IFName, Ruleset []byte
+ RuleNum, SubruleNum uint32
+ UID uint32
+ PID int32
+ RuleUID uint32
+ RulePID int32
+ Direction PFDirection
+ // The remainder is padding
+}
+
+func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ pf.Length = data[0]
+ pf.Family = ProtocolFamily(data[1])
+ pf.Action = data[2]
+ pf.Reason = data[3]
+ pf.IFName = data[4:20]
+ pf.Ruleset = data[20:36]
+ pf.RuleNum = binary.BigEndian.Uint32(data[36:40])
+ pf.SubruleNum = binary.BigEndian.Uint32(data[40:44])
+ pf.UID = binary.BigEndian.Uint32(data[44:48])
+ pf.PID = int32(binary.BigEndian.Uint32(data[48:52]))
+ pf.RuleUID = binary.BigEndian.Uint32(data[52:56])
+ pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60]))
+ pf.Direction = PFDirection(data[60])
+ if pf.Length%4 != 1 {
+ return errors.New("PFLog header length should be 3 less than multiple of 4")
+ }
+ actualLength := int(pf.Length) + 3
+ pf.Contents = data[:actualLength]
+ pf.Payload = data[actualLength:]
+ return nil
+}
+
+// LayerType returns layers.LayerTypePFLog
+func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog }
+
+func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog }
+
+func (pf *PFLog) NextLayerType() gopacket.LayerType {
+ return pf.Family.LayerType()
+}
+
+func decodePFLog(data []byte, p gopacket.PacketBuilder) error {
+ pf := &PFLog{}
+ return decodingLayerDecoder(pf, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/google/gopacket/layers/ports.go
new file mode 100644
index 0000000..705fd1d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ports.go
@@ -0,0 +1,154 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/google/gopacket"
+)
+
+// TCPPort is a port in a TCP layer.
+type TCPPort uint16
+
+// UDPPort is a port in a UDP layer.
+type UDPPort uint16
+
+// RUDPPort is a port in a RUDP layer.
+type RUDPPort uint8
+
+// SCTPPort is a port in a SCTP layer.
+type SCTPPort uint16
+
+// UDPLitePort is a port in a UDPLite layer.
+type UDPLitePort uint16
+
+// RUDPPortNames contains the string names for all RUDP ports.
+var RUDPPortNames = map[RUDPPort]string{}
+
+// UDPLitePortNames contains the string names for all UDPLite ports.
+var UDPLitePortNames = map[UDPLitePort]string{}
+
+// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// TCPPortNames.
+func (a TCPPort) String() string {
+ if name, ok := TCPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a TCPPort) LayerType() gopacket.LayerType {
+ lt := tcpPortLayerType[uint16(a)]
+ if lt != 0 {
+ return lt
+ }
+ return gopacket.LayerTypePayload
+}
+
+var tcpPortLayerType = [65536]gopacket.LayerType{
+ 53: LayerTypeDNS,
+ 443: LayerTypeTLS, // https
+ 502: LayerTypeModbusTCP, // modbustcp
+ 636: LayerTypeTLS, // ldaps
+ 989: LayerTypeTLS, // ftps-data
+ 990: LayerTypeTLS, // ftps
+ 992: LayerTypeTLS, // telnets
+ 993: LayerTypeTLS, // imaps
+ 994: LayerTypeTLS, // ircs
+ 995: LayerTypeTLS, // pop3s
+ 5061: LayerTypeTLS, // ips
+}
+
+// RegisterTCPPortLayerType creates a new mapping between a TCPPort
+// and an underlaying LayerType.
+func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) {
+ tcpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// UDPPortNames.
+func (a UDPPort) String() string {
+ if name, ok := UDPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a UDPPort) LayerType() gopacket.LayerType {
+ lt := udpPortLayerType[uint16(a)]
+ if lt != 0 {
+ return lt
+ }
+ return gopacket.LayerTypePayload
+}
+
+var udpPortLayerType = [65536]gopacket.LayerType{
+ 53: LayerTypeDNS,
+ 123: LayerTypeNTP,
+ 4789: LayerTypeVXLAN,
+ 67: LayerTypeDHCPv4,
+ 68: LayerTypeDHCPv4,
+ 546: LayerTypeDHCPv6,
+ 547: LayerTypeDHCPv6,
+ 5060: LayerTypeSIP,
+ 6343: LayerTypeSFlow,
+ 6081: LayerTypeGeneve,
+ 3784: LayerTypeBFD,
+ 2152: LayerTypeGTPv1U,
+}
+
+// RegisterUDPPortLayerType creates a new mapping between a UDPPort
+// and an underlaying LayerType.
+func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) {
+ udpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// RUDPPortNames.
+func (a RUDPPort) String() string {
+ if name, ok := RUDPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// SCTPPortNames.
+func (a SCTPPort) String() string {
+ if name, ok := SCTPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// UDPLitePortNames.
+func (a UDPLitePort) String() string {
+ if name, ok := UDPLitePortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
diff --git a/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/google/gopacket/layers/ppp.go
new file mode 100644
index 0000000..e534d69
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ppp.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+// PPP is the layer for PPP encapsulation headers.
+type PPP struct {
+ BaseLayer
+ PPPType PPPType
+ HasPPTPHeader bool
+}
+
+// PPPEndpoint is a singleton endpoint for PPP. Since there is no actual
+// addressing for the two ends of a PPP connection, we use a singleton value
+// named 'point' for each endpoint.
+var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil)
+
+// PPPFlow is a singleton flow for PPP. Since there is no actual addressing for
+// the two ends of a PPP connection, we use a singleton value to represent the
+// flow for all PPP connections.
+var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil)
+
+// LayerType returns LayerTypePPP
+func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP }
+
+// LinkFlow returns PPPFlow.
+func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow }
+
+func decodePPP(data []byte, p gopacket.PacketBuilder) error {
+ ppp := &PPP{}
+ offset := 0
+ if data[0] == 0xff && data[1] == 0x03 {
+ offset = 2
+ ppp.HasPPTPHeader = true
+ }
+ if data[offset]&0x1 == 0 {
+ if data[offset+1]&0x1 == 0 {
+ return errors.New("PPP has invalid type")
+ }
+ ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2]))
+ ppp.Contents = data[offset : offset+2]
+ ppp.Payload = data[offset+2:]
+ } else {
+ ppp.PPPType = PPPType(data[offset])
+ ppp.Contents = data[offset : offset+1]
+ ppp.Payload = data[offset+1:]
+ }
+ p.AddLayer(ppp)
+ p.SetLinkLayer(ppp)
+ return p.NextDecoder(ppp.PPPType)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if p.PPPType&0x100 == 0 {
+ bytes, err := b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(p.PPPType))
+ } else {
+ bytes, err := b.PrependBytes(1)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(p.PPPType)
+ }
+ if p.HasPPTPHeader {
+ bytes, err := b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = 0xff
+ bytes[1] = 0x03
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/google/gopacket/layers/pppoe.go
new file mode 100644
index 0000000..14cd63a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pppoe.go
@@ -0,0 +1,60 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// PPPoE is the layer for PPPoE encapsulation headers.
+type PPPoE struct {
+ BaseLayer
+ Version uint8
+ Type uint8
+ Code PPPoECode
+ SessionId uint16
+ Length uint16
+}
+
+// LayerType returns gopacket.LayerTypePPPoE.
+func (p *PPPoE) LayerType() gopacket.LayerType {
+ return LayerTypePPPoE
+}
+
+// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516).
+func decodePPPoE(data []byte, p gopacket.PacketBuilder) error {
+ pppoe := &PPPoE{
+ Version: data[0] >> 4,
+ Type: data[0] & 0x0F,
+ Code: PPPoECode(data[1]),
+ SessionId: binary.BigEndian.Uint16(data[2:4]),
+ Length: binary.BigEndian.Uint16(data[4:6]),
+ }
+ pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]}
+ p.AddLayer(pppoe)
+ return p.NextDecoder(pppoe.Code)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ payload := b.Bytes()
+ bytes, err := b.PrependBytes(6)
+ if err != nil {
+ return err
+ }
+ bytes[0] = (p.Version << 4) | p.Type
+ bytes[1] = byte(p.Code)
+ binary.BigEndian.PutUint16(bytes[2:], p.SessionId)
+ if opts.FixLengths {
+ p.Length = uint16(len(payload))
+ }
+ binary.BigEndian.PutUint16(bytes[4:], p.Length)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/google/gopacket/layers/prism.go
new file mode 100644
index 0000000..e1711e7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/prism.go
@@ -0,0 +1,146 @@
+// Copyright 2015 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+func decodePrismValue(data []byte, pv *PrismValue) {
+ pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4]))
+ pv.Status = binary.LittleEndian.Uint16(data[4:6])
+ pv.Length = binary.LittleEndian.Uint16(data[6:8])
+ pv.Data = data[8 : 8+pv.Length]
+}
+
+type PrismDID uint32
+
+const (
+ PrismDIDType1HostTime PrismDID = 0x10044
+ PrismDIDType2HostTime PrismDID = 0x01041
+ PrismDIDType1MACTime PrismDID = 0x20044
+ PrismDIDType2MACTime PrismDID = 0x02041
+ PrismDIDType1Channel PrismDID = 0x30044
+ PrismDIDType2Channel PrismDID = 0x03041
+ PrismDIDType1RSSI PrismDID = 0x40044
+ PrismDIDType2RSSI PrismDID = 0x04041
+ PrismDIDType1SignalQuality PrismDID = 0x50044
+ PrismDIDType2SignalQuality PrismDID = 0x05041
+ PrismDIDType1Signal PrismDID = 0x60044
+ PrismDIDType2Signal PrismDID = 0x06041
+ PrismDIDType1Noise PrismDID = 0x70044
+ PrismDIDType2Noise PrismDID = 0x07041
+ PrismDIDType1Rate PrismDID = 0x80044
+ PrismDIDType2Rate PrismDID = 0x08041
+ PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044
+ PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041
+ PrismDIDType1FrameLength PrismDID = 0xA0044
+ PrismDIDType2FrameLength PrismDID = 0x0A041
+)
+
+const (
+ PrismType1MessageCode uint16 = 0x00000044
+ PrismType2MessageCode uint16 = 0x00000041
+)
+
+func (p PrismDID) String() string {
+ dids := map[PrismDID]string{
+ PrismDIDType1HostTime: "Host Time",
+ PrismDIDType2HostTime: "Host Time",
+ PrismDIDType1MACTime: "MAC Time",
+ PrismDIDType2MACTime: "MAC Time",
+ PrismDIDType1Channel: "Channel",
+ PrismDIDType2Channel: "Channel",
+ PrismDIDType1RSSI: "RSSI",
+ PrismDIDType2RSSI: "RSSI",
+ PrismDIDType1SignalQuality: "Signal Quality",
+ PrismDIDType2SignalQuality: "Signal Quality",
+ PrismDIDType1Signal: "Signal",
+ PrismDIDType2Signal: "Signal",
+ PrismDIDType1Noise: "Noise",
+ PrismDIDType2Noise: "Noise",
+ PrismDIDType1Rate: "Rate",
+ PrismDIDType2Rate: "Rate",
+ PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator",
+ PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator",
+ PrismDIDType1FrameLength: "Frame Length",
+ PrismDIDType2FrameLength: "Frame Length",
+ }
+
+ if str, ok := dids[p]; ok {
+ return str
+ }
+
+ return "Unknown DID"
+}
+
+type PrismValue struct {
+ DID PrismDID
+ Status uint16
+ Length uint16
+ Data []byte
+}
+
+func (pv *PrismValue) IsSupplied() bool {
+ return pv.Status == 1
+}
+
+var ErrPrismExpectedMoreData = errors.New("Expected more data.")
+var ErrPrismInvalidCode = errors.New("Invalid header code.")
+
+func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error {
+ d := &PrismHeader{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type PrismHeader struct {
+ BaseLayer
+ Code uint16
+ Length uint16
+ DeviceName string
+ Values []PrismValue
+}
+
+func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader }
+
+func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Code = binary.LittleEndian.Uint16(data[0:4])
+ m.Length = binary.LittleEndian.Uint16(data[4:8])
+ m.DeviceName = string(data[8:24])
+ m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]}
+
+ switch m.Code {
+ case PrismType1MessageCode:
+ fallthrough
+ case PrismType2MessageCode:
+ // valid message code
+ default:
+ return ErrPrismInvalidCode
+ }
+
+ offset := uint16(24)
+
+ m.Values = make([]PrismValue, (m.Length-offset)/12)
+ for i := 0; i < len(m.Values); i++ {
+ decodePrismValue(data[offset:offset+12], &m.Values[i])
+ offset += 12
+ }
+
+ if offset != m.Length {
+ return ErrPrismExpectedMoreData
+ }
+
+ return nil
+}
+
+func (m *PrismHeader) CanDecode() gopacket.LayerClass { return LayerTypePrismHeader }
+func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/google/gopacket/layers/radiotap.go
new file mode 100644
index 0000000..17c6133
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/radiotap.go
@@ -0,0 +1,1069 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// align calculates the number of bytes needed to align with the width
+// on the offset, returning the number of bytes we need to skip to
+// align to the offset (width).
+func align(offset uint16, width uint16) uint16 {
+ return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset)
+}
+
+type RadioTapPresent uint32
+
+const (
+ RadioTapPresentTSFT RadioTapPresent = 1 << iota
+ RadioTapPresentFlags
+ RadioTapPresentRate
+ RadioTapPresentChannel
+ RadioTapPresentFHSS
+ RadioTapPresentDBMAntennaSignal
+ RadioTapPresentDBMAntennaNoise
+ RadioTapPresentLockQuality
+ RadioTapPresentTxAttenuation
+ RadioTapPresentDBTxAttenuation
+ RadioTapPresentDBMTxPower
+ RadioTapPresentAntenna
+ RadioTapPresentDBAntennaSignal
+ RadioTapPresentDBAntennaNoise
+ RadioTapPresentRxFlags
+ RadioTapPresentTxFlags
+ RadioTapPresentRtsRetries
+ RadioTapPresentDataRetries
+ _
+ RadioTapPresentMCS
+ RadioTapPresentAMPDUStatus
+ RadioTapPresentVHT
+ RadioTapPresentEXT RadioTapPresent = 1 << 31
+)
+
+func (r RadioTapPresent) TSFT() bool {
+ return r&RadioTapPresentTSFT != 0
+}
+func (r RadioTapPresent) Flags() bool {
+ return r&RadioTapPresentFlags != 0
+}
+func (r RadioTapPresent) Rate() bool {
+ return r&RadioTapPresentRate != 0
+}
+func (r RadioTapPresent) Channel() bool {
+ return r&RadioTapPresentChannel != 0
+}
+func (r RadioTapPresent) FHSS() bool {
+ return r&RadioTapPresentFHSS != 0
+}
+func (r RadioTapPresent) DBMAntennaSignal() bool {
+ return r&RadioTapPresentDBMAntennaSignal != 0
+}
+func (r RadioTapPresent) DBMAntennaNoise() bool {
+ return r&RadioTapPresentDBMAntennaNoise != 0
+}
+func (r RadioTapPresent) LockQuality() bool {
+ return r&RadioTapPresentLockQuality != 0
+}
+func (r RadioTapPresent) TxAttenuation() bool {
+ return r&RadioTapPresentTxAttenuation != 0
+}
+func (r RadioTapPresent) DBTxAttenuation() bool {
+ return r&RadioTapPresentDBTxAttenuation != 0
+}
+func (r RadioTapPresent) DBMTxPower() bool {
+ return r&RadioTapPresentDBMTxPower != 0
+}
+func (r RadioTapPresent) Antenna() bool {
+ return r&RadioTapPresentAntenna != 0
+}
+func (r RadioTapPresent) DBAntennaSignal() bool {
+ return r&RadioTapPresentDBAntennaSignal != 0
+}
+func (r RadioTapPresent) DBAntennaNoise() bool {
+ return r&RadioTapPresentDBAntennaNoise != 0
+}
+func (r RadioTapPresent) RxFlags() bool {
+ return r&RadioTapPresentRxFlags != 0
+}
+func (r RadioTapPresent) TxFlags() bool {
+ return r&RadioTapPresentTxFlags != 0
+}
+func (r RadioTapPresent) RtsRetries() bool {
+ return r&RadioTapPresentRtsRetries != 0
+}
+func (r RadioTapPresent) DataRetries() bool {
+ return r&RadioTapPresentDataRetries != 0
+}
+func (r RadioTapPresent) MCS() bool {
+ return r&RadioTapPresentMCS != 0
+}
+func (r RadioTapPresent) AMPDUStatus() bool {
+ return r&RadioTapPresentAMPDUStatus != 0
+}
+func (r RadioTapPresent) VHT() bool {
+ return r&RadioTapPresentVHT != 0
+}
+func (r RadioTapPresent) EXT() bool {
+ return r&RadioTapPresentEXT != 0
+}
+
+type RadioTapChannelFlags uint16
+
+const (
+ RadioTapChannelFlagsTurbo RadioTapChannelFlags = 0x0010 // Turbo channel
+ RadioTapChannelFlagsCCK RadioTapChannelFlags = 0x0020 // CCK channel
+ RadioTapChannelFlagsOFDM RadioTapChannelFlags = 0x0040 // OFDM channel
+ RadioTapChannelFlagsGhz2 RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel.
+ RadioTapChannelFlagsGhz5 RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel
+ RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed
+ RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel
+ RadioTapChannelFlagsGFSK RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY)
+)
+
+func (r RadioTapChannelFlags) Turbo() bool {
+ return r&RadioTapChannelFlagsTurbo != 0
+}
+func (r RadioTapChannelFlags) CCK() bool {
+ return r&RadioTapChannelFlagsCCK != 0
+}
+func (r RadioTapChannelFlags) OFDM() bool {
+ return r&RadioTapChannelFlagsOFDM != 0
+}
+func (r RadioTapChannelFlags) Ghz2() bool {
+ return r&RadioTapChannelFlagsGhz2 != 0
+}
+func (r RadioTapChannelFlags) Ghz5() bool {
+ return r&RadioTapChannelFlagsGhz5 != 0
+}
+func (r RadioTapChannelFlags) Passive() bool {
+ return r&RadioTapChannelFlagsPassive != 0
+}
+func (r RadioTapChannelFlags) Dynamic() bool {
+ return r&RadioTapChannelFlagsDynamic != 0
+}
+func (r RadioTapChannelFlags) GFSK() bool {
+ return r&RadioTapChannelFlagsGFSK != 0
+}
+
+// String provides a human readable string for RadioTapChannelFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapChannelFlags value, not its string.
+func (a RadioTapChannelFlags) String() string {
+ var out bytes.Buffer
+ if a.Turbo() {
+ out.WriteString("Turbo,")
+ }
+ if a.CCK() {
+ out.WriteString("CCK,")
+ }
+ if a.OFDM() {
+ out.WriteString("OFDM,")
+ }
+ if a.Ghz2() {
+ out.WriteString("Ghz2,")
+ }
+ if a.Ghz5() {
+ out.WriteString("Ghz5,")
+ }
+ if a.Passive() {
+ out.WriteString("Passive,")
+ }
+ if a.Dynamic() {
+ out.WriteString("Dynamic,")
+ }
+ if a.GFSK() {
+ out.WriteString("GFSK,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type RadioTapFlags uint8
+
+const (
+ RadioTapFlagsCFP RadioTapFlags = 1 << iota // sent/received during CFP
+ RadioTapFlagsShortPreamble // sent/received * with short * preamble
+ RadioTapFlagsWEP // sent/received * with WEP encryption
+ RadioTapFlagsFrag // sent/received * with fragmentation
+ RadioTapFlagsFCS // frame includes FCS
+ RadioTapFlagsDatapad // frame has padding between * 802.11 header and payload * (to 32-bit boundary)
+ RadioTapFlagsBadFCS // does not pass FCS check
+ RadioTapFlagsShortGI // HT short GI
+)
+
+func (r RadioTapFlags) CFP() bool {
+ return r&RadioTapFlagsCFP != 0
+}
+func (r RadioTapFlags) ShortPreamble() bool {
+ return r&RadioTapFlagsShortPreamble != 0
+}
+func (r RadioTapFlags) WEP() bool {
+ return r&RadioTapFlagsWEP != 0
+}
+func (r RadioTapFlags) Frag() bool {
+ return r&RadioTapFlagsFrag != 0
+}
+func (r RadioTapFlags) FCS() bool {
+ return r&RadioTapFlagsFCS != 0
+}
+func (r RadioTapFlags) Datapad() bool {
+ return r&RadioTapFlagsDatapad != 0
+}
+func (r RadioTapFlags) BadFCS() bool {
+ return r&RadioTapFlagsBadFCS != 0
+}
+func (r RadioTapFlags) ShortGI() bool {
+ return r&RadioTapFlagsShortGI != 0
+}
+
+// String provides a human readable string for RadioTapFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapFlags value, not its string.
+func (a RadioTapFlags) String() string {
+ var out bytes.Buffer
+ if a.CFP() {
+ out.WriteString("CFP,")
+ }
+ if a.ShortPreamble() {
+ out.WriteString("SHORT-PREAMBLE,")
+ }
+ if a.WEP() {
+ out.WriteString("WEP,")
+ }
+ if a.Frag() {
+ out.WriteString("FRAG,")
+ }
+ if a.FCS() {
+ out.WriteString("FCS,")
+ }
+ if a.Datapad() {
+ out.WriteString("DATAPAD,")
+ }
+ if a.ShortGI() {
+ out.WriteString("SHORT-GI,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type RadioTapRate uint8
+
+func (a RadioTapRate) String() string {
+ return fmt.Sprintf("%v Mb/s", 0.5*float32(a))
+}
+
+type RadioTapChannelFrequency uint16
+
+func (a RadioTapChannelFrequency) String() string {
+ return fmt.Sprintf("%d MHz", a)
+}
+
+type RadioTapRxFlags uint16
+
+const (
+ RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002
+)
+
+func (self RadioTapRxFlags) BadPlcp() bool {
+ return self&RadioTapRxFlagsBadPlcp != 0
+}
+
+func (self RadioTapRxFlags) String() string {
+ if self.BadPlcp() {
+ return "BADPLCP"
+ }
+ return ""
+}
+
+type RadioTapTxFlags uint16
+
+const (
+ RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota
+ RadioTapTxFlagsCTS
+ RadioTapTxFlagsRTS
+ RadioTapTxFlagsNoACK
+)
+
+func (self RadioTapTxFlags) Fail() bool { return self&RadioTapTxFlagsFail != 0 }
+func (self RadioTapTxFlags) CTS() bool { return self&RadioTapTxFlagsCTS != 0 }
+func (self RadioTapTxFlags) RTS() bool { return self&RadioTapTxFlagsRTS != 0 }
+func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 }
+
+func (self RadioTapTxFlags) String() string {
+ var tokens []string
+ if self.Fail() {
+ tokens = append(tokens, "Fail")
+ }
+ if self.CTS() {
+ tokens = append(tokens, "CTS")
+ }
+ if self.RTS() {
+ tokens = append(tokens, "RTS")
+ }
+ if self.NoACK() {
+ tokens = append(tokens, "NoACK")
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapMCS struct {
+ Known RadioTapMCSKnown
+ Flags RadioTapMCSFlags
+ MCS uint8
+}
+
+func (self RadioTapMCS) String() string {
+ var tokens []string
+ if self.Known.Bandwidth() {
+ token := "?"
+ switch self.Flags.Bandwidth() {
+ case 0:
+ token = "20"
+ case 1:
+ token = "40"
+ case 2:
+ token = "40(20L)"
+ case 3:
+ token = "40(20U)"
+ }
+ tokens = append(tokens, token)
+ }
+ if self.Known.MCSIndex() {
+ tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS))
+ }
+ if self.Known.GuardInterval() {
+ if self.Flags.ShortGI() {
+ tokens = append(tokens, fmt.Sprintf("shortGI"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("longGI"))
+ }
+ }
+ if self.Known.HTFormat() {
+ if self.Flags.Greenfield() {
+ tokens = append(tokens, fmt.Sprintf("HT-greenfield"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("HT-mixed"))
+ }
+ }
+ if self.Known.FECType() {
+ if self.Flags.FECLDPC() {
+ tokens = append(tokens, fmt.Sprintf("LDPC"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("BCC"))
+ }
+ }
+ if self.Known.STBC() {
+ tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC()))
+ }
+ if self.Known.NESS() {
+ num := 0
+ if self.Known.NESS1() {
+ num |= 0x02
+ }
+ if self.Flags.NESS0() {
+ num |= 0x01
+ }
+ tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapMCSKnown uint8
+
+const (
+ RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota
+ RadioTapMCSKnownMCSIndex
+ RadioTapMCSKnownGuardInterval
+ RadioTapMCSKnownHTFormat
+ RadioTapMCSKnownFECType
+ RadioTapMCSKnownSTBC
+ RadioTapMCSKnownNESS
+ RadioTapMCSKnownNESS1
+)
+
+func (self RadioTapMCSKnown) Bandwidth() bool { return self&RadioTapMCSKnownBandwidth != 0 }
+func (self RadioTapMCSKnown) MCSIndex() bool { return self&RadioTapMCSKnownMCSIndex != 0 }
+func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 }
+func (self RadioTapMCSKnown) HTFormat() bool { return self&RadioTapMCSKnownHTFormat != 0 }
+func (self RadioTapMCSKnown) FECType() bool { return self&RadioTapMCSKnownFECType != 0 }
+func (self RadioTapMCSKnown) STBC() bool { return self&RadioTapMCSKnownSTBC != 0 }
+func (self RadioTapMCSKnown) NESS() bool { return self&RadioTapMCSKnownNESS != 0 }
+func (self RadioTapMCSKnown) NESS1() bool { return self&RadioTapMCSKnownNESS1 != 0 }
+
+type RadioTapMCSFlags uint8
+
+const (
+ RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03
+ RadioTapMCSFlagsShortGI = 0x04
+ RadioTapMCSFlagsGreenfield = 0x08
+ RadioTapMCSFlagsFECLDPC = 0x10
+ RadioTapMCSFlagsSTBCMask = 0x60
+ RadioTapMCSFlagsNESS0 = 0x80
+)
+
+func (self RadioTapMCSFlags) Bandwidth() int {
+ return int(self & RadioTapMCSFlagsBandwidthMask)
+}
+func (self RadioTapMCSFlags) ShortGI() bool { return self&RadioTapMCSFlagsShortGI != 0 }
+func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 }
+func (self RadioTapMCSFlags) FECLDPC() bool { return self&RadioTapMCSFlagsFECLDPC != 0 }
+func (self RadioTapMCSFlags) STBC() int {
+ return int(self&RadioTapMCSFlagsSTBCMask) >> 5
+}
+func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 }
+
+type RadioTapAMPDUStatus struct {
+ Reference uint32
+ Flags RadioTapAMPDUStatusFlags
+ CRC uint8
+}
+
+func (self RadioTapAMPDUStatus) String() string {
+ tokens := []string{
+ fmt.Sprintf("ref#%x", self.Reference),
+ }
+ if self.Flags.ReportZerolen() && self.Flags.IsZerolen() {
+ tokens = append(tokens, fmt.Sprintf("zero-length"))
+ }
+ if self.Flags.LastKnown() && self.Flags.IsLast() {
+ tokens = append(tokens, "last")
+ }
+ if self.Flags.DelimCRCErr() {
+ tokens = append(tokens, "delimiter CRC error")
+ }
+ if self.Flags.DelimCRCKnown() {
+ tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapAMPDUStatusFlags uint16
+
+const (
+ RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota
+ RadioTapAMPDUIsZerolen
+ RadioTapAMPDULastKnown
+ RadioTapAMPDUIsLast
+ RadioTapAMPDUDelimCRCErr
+ RadioTapAMPDUDelimCRCKnown
+)
+
+func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool {
+ return self&RadioTapAMPDUStatusFlagsReportZerolen != 0
+}
+func (self RadioTapAMPDUStatusFlags) IsZerolen() bool { return self&RadioTapAMPDUIsZerolen != 0 }
+func (self RadioTapAMPDUStatusFlags) LastKnown() bool { return self&RadioTapAMPDULastKnown != 0 }
+func (self RadioTapAMPDUStatusFlags) IsLast() bool { return self&RadioTapAMPDUIsLast != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool { return self&RadioTapAMPDUDelimCRCErr != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool { return self&RadioTapAMPDUDelimCRCKnown != 0 }
+
+type RadioTapVHT struct {
+ Known RadioTapVHTKnown
+ Flags RadioTapVHTFlags
+ Bandwidth uint8
+ MCSNSS [4]RadioTapVHTMCSNSS
+ Coding uint8
+ GroupId uint8
+ PartialAID uint16
+}
+
+func (self RadioTapVHT) String() string {
+ var tokens []string
+ if self.Known.STBC() {
+ if self.Flags.STBC() {
+ tokens = append(tokens, "STBC")
+ } else {
+ tokens = append(tokens, "no STBC")
+ }
+ }
+ if self.Known.TXOPPSNotAllowed() {
+ if self.Flags.TXOPPSNotAllowed() {
+ tokens = append(tokens, "TXOP doze not allowed")
+ } else {
+ tokens = append(tokens, "TXOP doze allowed")
+ }
+ }
+ if self.Known.GI() {
+ if self.Flags.SGI() {
+ tokens = append(tokens, "short GI")
+ } else {
+ tokens = append(tokens, "long GI")
+ }
+ }
+ if self.Known.SGINSYMDisambiguation() {
+ if self.Flags.SGINSYMMod() {
+ tokens = append(tokens, "NSYM mod 10=9")
+ } else {
+ tokens = append(tokens, "NSYM mod 10!=9 or no short GI")
+ }
+ }
+ if self.Known.LDPCExtraOFDMSymbol() {
+ if self.Flags.LDPCExtraOFDMSymbol() {
+ tokens = append(tokens, "LDPC extra OFDM symbols")
+ } else {
+ tokens = append(tokens, "no LDPC extra OFDM symbols")
+ }
+ }
+ if self.Known.Beamformed() {
+ if self.Flags.Beamformed() {
+ tokens = append(tokens, "beamformed")
+ } else {
+ tokens = append(tokens, "no beamformed")
+ }
+ }
+ if self.Known.Bandwidth() {
+ token := "?"
+ switch self.Bandwidth & 0x1f {
+ case 0:
+ token = "20"
+ case 1:
+ token = "40"
+ case 2:
+ token = "40(20L)"
+ case 3:
+ token = "40(20U)"
+ case 4:
+ token = "80"
+ case 5:
+ token = "80(40L)"
+ case 6:
+ token = "80(40U)"
+ case 7:
+ token = "80(20LL)"
+ case 8:
+ token = "80(20LU)"
+ case 9:
+ token = "80(20UL)"
+ case 10:
+ token = "80(20UU)"
+ case 11:
+ token = "160"
+ case 12:
+ token = "160(80L)"
+ case 13:
+ token = "160(80U)"
+ case 14:
+ token = "160(40LL)"
+ case 15:
+ token = "160(40LU)"
+ case 16:
+ token = "160(40UL)"
+ case 17:
+ token = "160(40UU)"
+ case 18:
+ token = "160(20LLL)"
+ case 19:
+ token = "160(20LLU)"
+ case 20:
+ token = "160(20LUL)"
+ case 21:
+ token = "160(20LUU)"
+ case 22:
+ token = "160(20ULL)"
+ case 23:
+ token = "160(20ULU)"
+ case 24:
+ token = "160(20UUL)"
+ case 25:
+ token = "160(20UUU)"
+ }
+ tokens = append(tokens, token)
+ }
+ for i, MCSNSS := range self.MCSNSS {
+ if MCSNSS.Present() {
+ fec := "?"
+ switch self.Coding & (1 << uint8(i)) {
+ case 0:
+ fec = "BCC"
+ case 1:
+ fec = "LDPC"
+ }
+ tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec))
+ }
+ }
+ if self.Known.GroupId() {
+ tokens = append(tokens,
+ fmt.Sprintf("group=%d", self.GroupId))
+ }
+ if self.Known.PartialAID() {
+ tokens = append(tokens,
+ fmt.Sprintf("partial-AID=%d", self.PartialAID))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapVHTKnown uint16
+
+const (
+ RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota
+ RadioTapVHTKnownTXOPPSNotAllowed
+ RadioTapVHTKnownGI
+ RadioTapVHTKnownSGINSYMDisambiguation
+ RadioTapVHTKnownLDPCExtraOFDMSymbol
+ RadioTapVHTKnownBeamformed
+ RadioTapVHTKnownBandwidth
+ RadioTapVHTKnownGroupId
+ RadioTapVHTKnownPartialAID
+)
+
+func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 }
+func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool {
+ return self&RadioTapVHTKnownTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 }
+func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool {
+ return self&RadioTapVHTKnownSGINSYMDisambiguation != 0
+}
+func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool {
+ return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 }
+func (self RadioTapVHTKnown) Bandwidth() bool { return self&RadioTapVHTKnownBandwidth != 0 }
+func (self RadioTapVHTKnown) GroupId() bool { return self&RadioTapVHTKnownGroupId != 0 }
+func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 }
+
+type RadioTapVHTFlags uint8
+
+const (
+ RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota
+ RadioTapVHTFlagsTXOPPSNotAllowed
+ RadioTapVHTFlagsSGI
+ RadioTapVHTFlagsSGINSYMMod
+ RadioTapVHTFlagsLDPCExtraOFDMSymbol
+ RadioTapVHTFlagsBeamformed
+)
+
+func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 }
+func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool {
+ return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTFlags) SGI() bool { return self&RadioTapVHTFlagsSGI != 0 }
+func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 }
+func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool {
+ return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 }
+
+type RadioTapVHTMCSNSS uint8
+
+func (self RadioTapVHTMCSNSS) Present() bool {
+ return self&0x0F != 0
+}
+
+func (self RadioTapVHTMCSNSS) String() string {
+ return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4))
+}
+
+func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error {
+ d := &RadioTap{}
+ // TODO: Should we set LinkLayer here? And implement LinkFlow
+ return decodingLayerDecoder(d, data, p)
+}
+
+type RadioTap struct {
+ BaseLayer
+
+ // Version 0. Only increases for drastic changes, introduction of compatible new fields does not count.
+ Version uint8
+ // Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields.
+ Length uint16
+ // Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31.
+ Present RadioTapPresent
+ // TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only.
+ TSFT uint64
+ Flags RadioTapFlags
+ // Rate Tx/Rx data rate
+ Rate RadioTapRate
+ // ChannelFrequency Tx/Rx frequency in MHz, followed by flags
+ ChannelFrequency RadioTapChannelFrequency
+ ChannelFlags RadioTapChannelFlags
+ // FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte).
+ FHSS uint16
+ // DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt.
+ DBMAntennaSignal int8
+ // DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt.
+ DBMAntennaNoise int8
+ // LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets.
+ LockQuality uint16
+ // TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels.
+ TxAttenuation uint16
+ // DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels.
+ DBTxAttenuation uint16
+ // DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port.
+ DBMTxPower int8
+ // Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0.
+ Antenna uint8
+ // DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference.
+ DBAntennaSignal uint8
+ // DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point.
+ DBAntennaNoise uint8
+ //
+ RxFlags RadioTapRxFlags
+ TxFlags RadioTapTxFlags
+ RtsRetries uint8
+ DataRetries uint8
+ MCS RadioTapMCS
+ AMPDUStatus RadioTapAMPDUStatus
+ VHT RadioTapVHT
+}
+
+func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap }
+
+func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Version = uint8(data[0])
+ m.Length = binary.LittleEndian.Uint16(data[2:4])
+ m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8]))
+
+ offset := uint16(4)
+
+ for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 {
+ // This parser only handles standard radiotap namespace,
+ // and expects all fields are packed in the first it_present.
+ // Extended bitmap will be just ignored.
+ offset += 4
+ }
+ offset += 4 // skip the bitmap
+
+ if m.Present.TSFT() {
+ offset += align(offset, 8)
+ m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8])
+ offset += 8
+ }
+ if m.Present.Flags() {
+ m.Flags = RadioTapFlags(data[offset])
+ offset++
+ }
+ if m.Present.Rate() {
+ m.Rate = RadioTapRate(data[offset])
+ offset++
+ }
+ if m.Present.Channel() {
+ offset += align(offset, 2)
+ m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2]))
+ offset += 2
+ m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2]))
+ offset += 2
+ }
+ if m.Present.FHSS() {
+ m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBMAntennaSignal() {
+ m.DBMAntennaSignal = int8(data[offset])
+ offset++
+ }
+ if m.Present.DBMAntennaNoise() {
+ m.DBMAntennaNoise = int8(data[offset])
+ offset++
+ }
+ if m.Present.LockQuality() {
+ offset += align(offset, 2)
+ m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.TxAttenuation() {
+ offset += align(offset, 2)
+ m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBTxAttenuation() {
+ offset += align(offset, 2)
+ m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBMTxPower() {
+ m.DBMTxPower = int8(data[offset])
+ offset++
+ }
+ if m.Present.Antenna() {
+ m.Antenna = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DBAntennaSignal() {
+ m.DBAntennaSignal = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DBAntennaNoise() {
+ m.DBAntennaNoise = uint8(data[offset])
+ offset++
+ }
+ if m.Present.RxFlags() {
+ offset += align(offset, 2)
+ m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:]))
+ offset += 2
+ }
+ if m.Present.TxFlags() {
+ offset += align(offset, 2)
+ m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:]))
+ offset += 2
+ }
+ if m.Present.RtsRetries() {
+ m.RtsRetries = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DataRetries() {
+ m.DataRetries = uint8(data[offset])
+ offset++
+ }
+ if m.Present.MCS() {
+ m.MCS = RadioTapMCS{
+ RadioTapMCSKnown(data[offset]),
+ RadioTapMCSFlags(data[offset+1]),
+ uint8(data[offset+2]),
+ }
+ offset += 3
+ }
+ if m.Present.AMPDUStatus() {
+ offset += align(offset, 4)
+ m.AMPDUStatus = RadioTapAMPDUStatus{
+ Reference: binary.LittleEndian.Uint32(data[offset:]),
+ Flags: RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])),
+ CRC: uint8(data[offset+6]),
+ }
+ offset += 8
+ }
+ if m.Present.VHT() {
+ offset += align(offset, 2)
+ m.VHT = RadioTapVHT{
+ Known: RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])),
+ Flags: RadioTapVHTFlags(data[offset+2]),
+ Bandwidth: uint8(data[offset+3]),
+ MCSNSS: [4]RadioTapVHTMCSNSS{
+ RadioTapVHTMCSNSS(data[offset+4]),
+ RadioTapVHTMCSNSS(data[offset+5]),
+ RadioTapVHTMCSNSS(data[offset+6]),
+ RadioTapVHTMCSNSS(data[offset+7]),
+ },
+ Coding: uint8(data[offset+8]),
+ GroupId: uint8(data[offset+9]),
+ PartialAID: binary.LittleEndian.Uint16(data[offset+10:]),
+ }
+ offset += 12
+ }
+
+ payload := data[m.Length:]
+
+ // Remove non standard padding used by some Wi-Fi drivers
+ if m.Flags.Datapad() &&
+ payload[0]&0xC == 0x8 { //&& // Data frame
+ headlen := 24
+ if payload[0]&0x8C == 0x88 { // QoS
+ headlen += 2
+ }
+ if payload[1]&0x3 == 0x3 { // 4 addresses
+ headlen += 2
+ }
+ if headlen%4 == 2 {
+ payload = append(payload[:headlen], payload[headlen+2:len(payload)]...)
+ }
+ }
+
+ if !m.Flags.FCS() {
+ // Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum
+ // If a user is handing in subslices or packets from a buffered stream, the capacity of the slice
+ // may extend beyond the len, rather than expecting callers to enforce cap==len on every packet
+ // we take the hit in this one case and do a reallocation. If the user DOES enforce cap==len
+ // then the reallocation will happen anyway on the append. This is requried because the append
+ // write to the memory directly after the payload if there is sufficient capacity, which callers
+ // may not expect.
+ reallocPayload := make([]byte, len(payload)+4)
+ copy(reallocPayload[0:len(payload)], payload)
+ h := crc32.NewIEEE()
+ h.Write(payload)
+ binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32())
+ payload = reallocPayload
+ }
+ m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload}
+
+ return nil
+}
+
+func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf := make([]byte, 1024)
+
+ buf[0] = m.Version
+ buf[1] = 0
+
+ binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present))
+
+ offset := uint16(4)
+
+ for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 {
+ offset += 4
+ }
+
+ offset += 4
+
+ if m.Present.TSFT() {
+ offset += align(offset, 8)
+ binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT)
+ offset += 8
+ }
+
+ if m.Present.Flags() {
+ buf[offset] = uint8(m.Flags)
+ offset++
+ }
+
+ if m.Present.Rate() {
+ buf[offset] = uint8(m.Rate)
+ offset++
+ }
+
+ if m.Present.Channel() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency))
+ offset += 2
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags))
+ offset += 2
+ }
+
+ if m.Present.FHSS() {
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS)
+ offset += 2
+ }
+
+ if m.Present.DBMAntennaSignal() {
+ buf[offset] = byte(m.DBMAntennaSignal)
+ offset++
+ }
+
+ if m.Present.DBMAntennaNoise() {
+ buf[offset] = byte(m.DBMAntennaNoise)
+ offset++
+ }
+
+ if m.Present.LockQuality() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality)
+ offset += 2
+ }
+
+ if m.Present.TxAttenuation() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation)
+ offset += 2
+ }
+
+ if m.Present.DBTxAttenuation() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation)
+ offset += 2
+ }
+
+ if m.Present.DBMTxPower() {
+ buf[offset] = byte(m.DBMTxPower)
+ offset++
+ }
+
+ if m.Present.Antenna() {
+ buf[offset] = uint8(m.Antenna)
+ offset++
+ }
+
+ if m.Present.DBAntennaSignal() {
+ buf[offset] = uint8(m.DBAntennaSignal)
+ offset++
+ }
+
+ if m.Present.DBAntennaNoise() {
+ buf[offset] = uint8(m.DBAntennaNoise)
+ offset++
+ }
+
+ if m.Present.RxFlags() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags))
+ offset += 2
+ }
+
+ if m.Present.TxFlags() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags))
+ offset += 2
+ }
+
+ if m.Present.RtsRetries() {
+ buf[offset] = m.RtsRetries
+ offset++
+ }
+
+ if m.Present.DataRetries() {
+ buf[offset] = m.DataRetries
+ offset++
+ }
+
+ if m.Present.MCS() {
+ buf[offset] = uint8(m.MCS.Known)
+ buf[offset+1] = uint8(m.MCS.Flags)
+ buf[offset+2] = uint8(m.MCS.MCS)
+
+ offset += 3
+ }
+
+ if m.Present.AMPDUStatus() {
+ offset += align(offset, 4)
+
+ binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference)
+ binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags))
+
+ buf[offset+6] = m.AMPDUStatus.CRC
+
+ offset += 8
+ }
+
+ if m.Present.VHT() {
+ offset += align(offset, 2)
+
+ binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known))
+
+ buf[offset+2] = uint8(m.VHT.Flags)
+ buf[offset+3] = uint8(m.VHT.Bandwidth)
+ buf[offset+4] = uint8(m.VHT.MCSNSS[0])
+ buf[offset+5] = uint8(m.VHT.MCSNSS[1])
+ buf[offset+6] = uint8(m.VHT.MCSNSS[2])
+ buf[offset+7] = uint8(m.VHT.MCSNSS[3])
+ buf[offset+8] = uint8(m.VHT.Coding)
+ buf[offset+9] = uint8(m.VHT.GroupId)
+
+ binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID)
+
+ offset += 12
+ }
+
+ packetBuf, err := b.PrependBytes(int(offset))
+
+ if err != nil {
+ return err
+ }
+
+ if opts.FixLengths {
+ m.Length = offset
+ }
+
+ binary.LittleEndian.PutUint16(buf[2:4], m.Length)
+
+ copy(packetBuf, buf)
+
+ return nil
+}
+
+func (m *RadioTap) CanDecode() gopacket.LayerClass { return LayerTypeRadioTap }
+func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/google/gopacket/layers/rudp.go
new file mode 100644
index 0000000..8435129
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rudp.go
@@ -0,0 +1,93 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+type RUDP struct {
+ BaseLayer
+ SYN, ACK, EACK, RST, NUL bool
+ Version uint8
+ HeaderLength uint8
+ SrcPort, DstPort RUDPPort
+ DataLength uint16
+ Seq, Ack, Checksum uint32
+ VariableHeaderArea []byte
+ // RUDPHeaderSyn contains SYN information for the RUDP packet,
+ // if the SYN flag is set
+ *RUDPHeaderSYN
+ // RUDPHeaderEack contains EACK information for the RUDP packet,
+ // if the EACK flag is set.
+ *RUDPHeaderEACK
+}
+
+type RUDPHeaderSYN struct {
+ MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16
+}
+
+type RUDPHeaderEACK struct {
+ SeqsReceivedOK []uint32
+}
+
+// LayerType returns gopacket.LayerTypeRUDP.
+func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP }
+
+func decodeRUDP(data []byte, p gopacket.PacketBuilder) error {
+ r := &RUDP{
+ SYN: data[0]&0x80 != 0,
+ ACK: data[0]&0x40 != 0,
+ EACK: data[0]&0x20 != 0,
+ RST: data[0]&0x10 != 0,
+ NUL: data[0]&0x08 != 0,
+ Version: data[0] & 0x3,
+ HeaderLength: data[1],
+ SrcPort: RUDPPort(data[2]),
+ DstPort: RUDPPort(data[3]),
+ DataLength: binary.BigEndian.Uint16(data[4:6]),
+ Seq: binary.BigEndian.Uint32(data[6:10]),
+ Ack: binary.BigEndian.Uint32(data[10:14]),
+ Checksum: binary.BigEndian.Uint32(data[14:18]),
+ }
+ if r.HeaderLength < 9 {
+ return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength)
+ }
+ hlen := int(r.HeaderLength) * 2
+ r.Contents = data[:hlen]
+ r.Payload = data[hlen : hlen+int(r.DataLength)]
+ r.VariableHeaderArea = data[18:hlen]
+ headerData := r.VariableHeaderArea
+ switch {
+ case r.SYN:
+ if len(headerData) != 6 {
+ return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData))
+ }
+ r.RUDPHeaderSYN = &RUDPHeaderSYN{
+ MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]),
+ MaxSegmentSize: binary.BigEndian.Uint16(headerData[2:4]),
+ OptionFlags: binary.BigEndian.Uint16(headerData[4:6]),
+ }
+ case r.EACK:
+ if len(headerData)%4 != 0 {
+ return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData))
+ }
+ r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)}
+ for i := 0; i < len(headerData); i += 4 {
+ r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4])
+ }
+ }
+ p.AddLayer(r)
+ p.SetTransportLayer(r)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (r *RUDP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)})
+}
diff --git a/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/google/gopacket/layers/sctp.go
new file mode 100644
index 0000000..511176e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sctp.go
@@ -0,0 +1,746 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+
+ "github.com/google/gopacket"
+)
+
+// SCTP contains information on the top level of an SCTP packet.
+type SCTP struct {
+ BaseLayer
+ SrcPort, DstPort SCTPPort
+ VerificationTag uint32
+ Checksum uint32
+ sPort, dPort []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTP
+func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP }
+
+func decodeSCTP(data []byte, p gopacket.PacketBuilder) error {
+ sctp := &SCTP{}
+ err := sctp.DecodeFromBytes(data, p)
+ p.AddLayer(sctp)
+ p.SetTransportLayer(sctp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(sctpChunkTypePrefixDecoder)
+}
+
+var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)
+
+// TransportFlow returns a flow based on the source and destination SCTP port.
+func (s *SCTP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort)
+}
+
+func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error {
+ chunkType := SCTPChunkType(data[0])
+ return chunkType.Decode(data, p)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(12)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort))
+ binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag)
+ if opts.ComputeChecksums {
+ // Note: MakeTable(Castagnoli) actually only creates the table once, then
+ // passes back a singleton on every other call, so this shouldn't cause
+ // excessive memory allocation.
+ binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli)))
+ }
+ return nil
+}
+
+func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ return errors.New("Invalid SCTP common header length")
+ }
+ sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2]))
+ sctp.sPort = data[:2]
+ sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4]))
+ sctp.dPort = data[2:4]
+ sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8])
+ sctp.Checksum = binary.BigEndian.Uint32(data[8:12])
+ sctp.BaseLayer = BaseLayer{data[:12], data[12:]}
+
+ return nil
+}
+
+func (t *SCTP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSCTP
+}
+
+func (t *SCTP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// SCTPChunk contains the common fields in all SCTP chunks.
+type SCTPChunk struct {
+ BaseLayer
+ Type SCTPChunkType
+ Flags uint8
+ Length uint16
+ // ActualLength is the total length of an SCTP chunk, including padding.
+ // SCTP chunks start and end on 4-byte boundaries. So if a chunk has a length
+ // of 18, it means that it has data up to and including byte 18, then padding
+ // up to the next 4-byte boundary, 20. In this case, Length would be 18, and
+ // ActualLength would be 20.
+ ActualLength int
+}
+
+func roundUpToNearest4(i int) int {
+ if i%4 == 0 {
+ return i
+ }
+ return i + 4 - (i % 4)
+}
+
+func decodeSCTPChunk(data []byte) (SCTPChunk, error) {
+ length := binary.BigEndian.Uint16(data[2:4])
+ if length < 4 {
+ return SCTPChunk{}, errors.New("invalid SCTP chunk length")
+ }
+ actual := roundUpToNearest4(int(length))
+ ct := SCTPChunkType(data[0])
+
+ // For SCTP Data, use a separate layer for the payload
+ delta := 0
+ if ct == SCTPChunkTypeData {
+ delta = int(actual) - int(length)
+ actual = 16
+ }
+
+ return SCTPChunk{
+ Type: ct,
+ Flags: data[1],
+ Length: length,
+ ActualLength: actual,
+ BaseLayer: BaseLayer{data[:actual], data[actual : len(data)-delta]},
+ }, nil
+}
+
+// SCTPParameter is a TLV parameter inside a SCTPChunk.
+type SCTPParameter struct {
+ Type uint16
+ Length uint16
+ ActualLength int
+ Value []byte
+}
+
+func decodeSCTPParameter(data []byte) SCTPParameter {
+ length := binary.BigEndian.Uint16(data[2:4])
+ return SCTPParameter{
+ Type: binary.BigEndian.Uint16(data[0:2]),
+ Length: length,
+ Value: data[4:length],
+ ActualLength: roundUpToNearest4(int(length)),
+ }
+}
+
+func (p SCTPParameter) Bytes() []byte {
+ length := 4 + len(p.Value)
+ data := make([]byte, roundUpToNearest4(length))
+ binary.BigEndian.PutUint16(data[0:2], p.Type)
+ binary.BigEndian.PutUint16(data[2:4], uint16(length))
+ copy(data[4:], p.Value)
+ return data
+}
+
+// SCTPUnknownChunkType is the layer type returned when we don't recognize the
+// chunk type. Since there's a length in a known location, we can skip over
+// it even if we don't know what it is, and continue parsing the rest of the
+// chunks. This chunk is stored as an ErrorLayer in the packet.
+type SCTPUnknownChunkType struct {
+ SCTPChunk
+ bytes []byte
+}
+
+func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPUnknownChunkType{SCTPChunk: chunk}
+ sc.bytes = data[:sc.ActualLength]
+ p.AddLayer(sc)
+ p.SetErrorLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(s.ActualLength)
+ if err != nil {
+ return err
+ }
+ copy(bytes, s.bytes)
+ return nil
+}
+
+// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType.
+func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType }
+
+// Payload returns all bytes in this header, including the decoded Type, Length,
+// and Flags.
+func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes }
+
+// Error implements ErrorLayer.
+func (s *SCTPUnknownChunkType) Error() error {
+ return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type)
+}
+
+// SCTPData is the SCTP Data chunk layer.
+type SCTPData struct {
+ SCTPChunk
+ Unordered, BeginFragment, EndFragment bool
+ TSN uint32
+ StreamId uint16
+ StreamSequence uint16
+ PayloadProtocol SCTPPayloadProtocol
+}
+
+// LayerType returns gopacket.LayerTypeSCTPData.
+func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData }
+
+// SCTPPayloadProtocol represents a payload protocol
+type SCTPPayloadProtocol uint32
+
+// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml
+const (
+ SCTPProtocolReserved SCTPPayloadProtocol = 0
+ SCTPPayloadUIA = 1
+ SCTPPayloadM2UA = 2
+ SCTPPayloadM3UA = 3
+ SCTPPayloadSUA = 4
+ SCTPPayloadM2PA = 5
+ SCTPPayloadV5UA = 6
+ SCTPPayloadH248 = 7
+ SCTPPayloadBICC = 8
+ SCTPPayloadTALI = 9
+ SCTPPayloadDUA = 10
+ SCTPPayloadASAP = 11
+ SCTPPayloadENRP = 12
+ SCTPPayloadH323 = 13
+ SCTPPayloadQIPC = 14
+ SCTPPayloadSIMCO = 15
+ SCTPPayloadDDPSegment = 16
+ SCTPPayloadDDPStream = 17
+ SCTPPayloadS1AP = 18
+)
+
+func (p SCTPPayloadProtocol) String() string {
+ switch p {
+ case SCTPProtocolReserved:
+ return "Reserved"
+ case SCTPPayloadUIA:
+ return "UIA"
+ case SCTPPayloadM2UA:
+ return "M2UA"
+ case SCTPPayloadM3UA:
+ return "M3UA"
+ case SCTPPayloadSUA:
+ return "SUA"
+ case SCTPPayloadM2PA:
+ return "M2PA"
+ case SCTPPayloadV5UA:
+ return "V5UA"
+ case SCTPPayloadH248:
+ return "H.248"
+ case SCTPPayloadBICC:
+ return "BICC"
+ case SCTPPayloadTALI:
+ return "TALI"
+ case SCTPPayloadDUA:
+ return "DUA"
+ case SCTPPayloadASAP:
+ return "ASAP"
+ case SCTPPayloadENRP:
+ return "ENRP"
+ case SCTPPayloadH323:
+ return "H.323"
+ case SCTPPayloadQIPC:
+ return "QIPC"
+ case SCTPPayloadSIMCO:
+ return "SIMCO"
+ case SCTPPayloadDDPSegment:
+ return "DDPSegment"
+ case SCTPPayloadDDPStream:
+ return "DDPStream"
+ case SCTPPayloadS1AP:
+ return "S1AP"
+ }
+ return fmt.Sprintf("Unknown(%d)", p)
+}
+
+func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPData{
+ SCTPChunk: chunk,
+ Unordered: data[1]&0x4 != 0,
+ BeginFragment: data[1]&0x2 != 0,
+ EndFragment: data[1]&0x1 != 0,
+ TSN: binary.BigEndian.Uint32(data[4:8]),
+ StreamId: binary.BigEndian.Uint16(data[8:10]),
+ StreamSequence: binary.BigEndian.Uint16(data[10:12]),
+ PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])),
+ }
+ // Length is the length in bytes of the data, INCLUDING the 16-byte header.
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ payload := b.Bytes()
+ // Pad the payload to a 32 bit boundary
+ if rem := len(payload) % 4; rem != 0 {
+ b.AppendBytes(4 - rem)
+ }
+ length := 16
+ bytes, err := b.PrependBytes(length)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ flags := uint8(0)
+ if sc.Unordered {
+ flags |= 0x4
+ }
+ if sc.BeginFragment {
+ flags |= 0x2
+ }
+ if sc.EndFragment {
+ flags |= 0x1
+ }
+ bytes[1] = flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload)))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.TSN)
+ binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId)
+ binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence)
+ binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol))
+ return nil
+}
+
+// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet.
+type SCTPInitParameter SCTPParameter
+
+// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck
+// messages.
+type SCTPInit struct {
+ SCTPChunk
+ InitiateTag uint32
+ AdvertisedReceiverWindowCredit uint32
+ OutboundStreams, InboundStreams uint16
+ InitialTSN uint32
+ Parameters []SCTPInitParameter
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck.
+func (sc *SCTPInit) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeInitAck {
+ return LayerTypeSCTPInitAck
+ }
+ // sc.Type == SCTPChunkTypeInit
+ return LayerTypeSCTPInit
+}
+
+func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPInit{
+ SCTPChunk: chunk,
+ InitiateTag: binary.BigEndian.Uint32(data[4:8]),
+ AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+ OutboundStreams: binary.BigEndian.Uint16(data[12:14]),
+ InboundStreams: binary.BigEndian.Uint16(data[14:16]),
+ InitialTSN: binary.BigEndian.Uint32(data[16:20]),
+ }
+ paramData := data[20:sc.ActualLength]
+ for len(paramData) > 0 {
+ p := SCTPInitParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 20 + len(payload)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag)
+ binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+ binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams)
+ binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams)
+ binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN)
+ copy(bytes[20:], payload)
+ return nil
+}
+
+// SCTPSack is the SCTP Selective ACK chunk layer.
+type SCTPSack struct {
+ SCTPChunk
+ CumulativeTSNAck uint32
+ AdvertisedReceiverWindowCredit uint32
+ NumGapACKs, NumDuplicateTSNs uint16
+ GapACKs []uint16
+ DuplicateTSNs []uint32
+}
+
+// LayerType return LayerTypeSCTPSack
+func (sc *SCTPSack) LayerType() gopacket.LayerType {
+ return LayerTypeSCTPSack
+}
+
+func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPSack{
+ SCTPChunk: chunk,
+ CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+ AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+ NumGapACKs: binary.BigEndian.Uint16(data[12:14]),
+ NumDuplicateTSNs: binary.BigEndian.Uint16(data[14:16]),
+ }
+ // We maximize gapAcks and dupTSNs here so we're not allocating tons
+ // of memory based on a user-controlable field. Our maximums are not exact,
+ // but should give us sane defaults... we'll still hit slice boundaries and
+ // fail if the user-supplied values are too high (in the for loops below), but
+ // the amount of memory we'll have allocated because of that should be small
+ // (< sc.ActualLength)
+ gapAcks := sc.SCTPChunk.ActualLength / 2
+ dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4
+ if gapAcks > int(sc.NumGapACKs) {
+ gapAcks = int(sc.NumGapACKs)
+ }
+ if dupTSNs > int(sc.NumDuplicateTSNs) {
+ dupTSNs = int(sc.NumDuplicateTSNs)
+ }
+ sc.GapACKs = make([]uint16, 0, gapAcks)
+ sc.DuplicateTSNs = make([]uint32, 0, dupTSNs)
+ bytesRemaining := data[16:]
+ for i := 0; i < int(sc.NumGapACKs); i++ {
+ sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2]))
+ bytesRemaining = bytesRemaining[2:]
+ }
+ for i := 0; i < int(sc.NumDuplicateTSNs); i++ {
+ sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4]))
+ bytesRemaining = bytesRemaining[4:]
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+ binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+ binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs)))
+ binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs)))
+ for i, v := range sc.GapACKs {
+ binary.BigEndian.PutUint16(bytes[16+i*2:], v)
+ }
+ offset := 16 + 2*len(sc.GapACKs)
+ for i, v := range sc.DuplicateTSNs {
+ binary.BigEndian.PutUint32(bytes[offset+i*4:], v)
+ }
+ return nil
+}
+
+// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and
+// heartbeat ack layers.
+type SCTPHeartbeatParameter SCTPParameter
+
+// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack.
+type SCTPHeartbeat struct {
+ SCTPChunk
+ Parameters []SCTPHeartbeatParameter
+}
+
+// LayerType returns gopacket.LayerTypeSCTPHeartbeat.
+func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeHeartbeatAck {
+ return LayerTypeSCTPHeartbeatAck
+ }
+ // sc.Type == SCTPChunkTypeHeartbeat
+ return LayerTypeSCTPHeartbeat
+}
+
+func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPHeartbeat{
+ SCTPChunk: chunk,
+ }
+ paramData := data[4:sc.Length]
+ for len(paramData) > 0 {
+ p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 4 + len(payload)
+
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], payload)
+ return nil
+}
+
+// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers.
+type SCTPErrorParameter SCTPParameter
+
+// SCTPError is the SCTP error layer, also used for SCTP aborts.
+type SCTPError struct {
+ SCTPChunk
+ Parameters []SCTPErrorParameter
+}
+
+// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError.
+func (sc *SCTPError) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeAbort {
+ return LayerTypeSCTPAbort
+ }
+ // sc.Type == SCTPChunkTypeError
+ return LayerTypeSCTPError
+}
+
+func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error {
+ // remarkably similar to decodeSCTPHeartbeat ;)
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPError{
+ SCTPChunk: chunk,
+ }
+ paramData := data[4:sc.Length]
+ for len(paramData) > 0 {
+ p := SCTPErrorParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 4 + len(payload)
+
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], payload)
+ return nil
+}
+
+// SCTPShutdown is the SCTP shutdown layer.
+type SCTPShutdown struct {
+ SCTPChunk
+ CumulativeTSNAck uint32
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdown.
+func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown }
+
+func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPShutdown{
+ SCTPChunk: chunk,
+ CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 8)
+ binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+ return nil
+}
+
+// SCTPShutdownAck is the SCTP shutdown layer.
+type SCTPShutdownAck struct {
+ SCTPChunk
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdownAck.
+func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck }
+
+func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPShutdownAck{
+ SCTPChunk: chunk,
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 4)
+ return nil
+}
+
+// SCTPCookieEcho is the SCTP Cookie Echo layer.
+type SCTPCookieEcho struct {
+ SCTPChunk
+ Cookie []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTPCookieEcho.
+func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho }
+
+func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPCookieEcho{
+ SCTPChunk: chunk,
+ }
+ sc.Cookie = data[4:sc.Length]
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := 4 + len(sc.Cookie)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], sc.Cookie)
+ return nil
+}
+
+// This struct is used by all empty SCTP chunks (currently CookieAck and
+// ShutdownComplete).
+type SCTPEmptyLayer struct {
+ SCTPChunk
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or
+// LayerTypeSCTPCookieAck.
+func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeShutdownComplete {
+ return LayerTypeSCTPShutdownComplete
+ }
+ // sc.Type == SCTPChunkTypeCookieAck
+ return LayerTypeSCTPCookieAck
+}
+
+func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPEmptyLayer{
+ SCTPChunk: chunk,
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 4)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go
new file mode 100644
index 0000000..c56fe89
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sflow.go
@@ -0,0 +1,2480 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+This layer decodes SFlow version 5 datagrams.
+
+The specification can be found here: http://sflow.org/sflow_version_5.txt
+
+Additional developer information about sflow can be found at:
+http://sflow.org/developers/specifications.php
+
+And SFlow in general:
+http://sflow.org/index.php
+
+Two forms of sample data are defined: compact and expanded. The
+Specification has this to say:
+
+ Compact and expand forms of counter and flow samples are defined.
+ An agent must not mix compact/expanded encodings. If an agent
+ will never use ifIndex numbers >= 2^24 then it must use compact
+ encodings for all interfaces. Otherwise the expanded formats must
+ be used for all interfaces.
+
+This decoder only supports the compact form, because that is the only
+one for which data was avaialble.
+
+The datagram is composed of one or more samples of type flow or counter,
+and each sample is composed of one or more records describing the sample.
+A sample is a single instance of sampled inforamtion, and each record in
+the sample gives additional / supplimentary information about the sample.
+
+The following sample record types are supported:
+
+ Raw Packet Header
+ opaque = flow_data; enterprise = 0; format = 1
+
+ Extended Switch Data
+ opaque = flow_data; enterprise = 0; format = 1001
+
+ Extended Router Data
+ opaque = flow_data; enterprise = 0; format = 1002
+
+ Extended Gateway Data
+ opaque = flow_data; enterprise = 0; format = 1003
+
+ Extended User Data
+ opaque = flow_data; enterprise = 0; format = 1004
+
+ Extended URL Data
+ opaque = flow_data; enterprise = 0; format = 1005
+
+The following types of counter records are supported:
+
+ Generic Interface Counters - see RFC 2233
+ opaque = counter_data; enterprise = 0; format = 1
+
+ Ethernet Interface Counters - see RFC 2358
+ opaque = counter_data; enterprise = 0; format = 2
+
+SFlow is encoded using XDR (RFC4506). There are a few places
+where the standard 4-byte fields are partitioned into two
+bitfields of different lengths. I'm not sure why the designers
+chose to pack together two values like this in some places, and
+in others they use the entire 4-byte value to store a number that
+will never be more than a few bits. In any case, there are a couple
+of types defined to handle the decoding of these bitfields, and
+that's why they're there. */
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// SFlowRecord holds both flow sample records and counter sample records.
+// A Record is the structure that actually holds the sampled data
+// and / or counters.
+type SFlowRecord interface {
+}
+
+// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant
+// 2 bits, and an SFlowSourceValue in its least significant 30 bits.
+// These types and values define the meaning of the inteface information
+// presented in the sample metadata.
+type SFlowDataSource int32
+
+func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) {
+ leftField := sdc >> 30
+ rightField := uint32(0x3FFFFFFF) & uint32(sdc)
+ return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowDataSourceExpanded struct {
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+}
+
+func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) {
+ leftField := sdce.SourceIDClass >> 30
+ rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex)
+ return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowSourceFormat uint32
+
+type SFlowSourceValue uint32
+
+const (
+ SFlowTypeSingleInterface SFlowSourceFormat = 0
+ SFlowTypePacketDiscarded SFlowSourceFormat = 1
+ SFlowTypeMultipleDestinations SFlowSourceFormat = 2
+)
+
+func (sdf SFlowSourceFormat) String() string {
+ switch sdf {
+ case SFlowTypeSingleInterface:
+ return "Single Interface"
+ case SFlowTypePacketDiscarded:
+ return "Packet Discarded"
+ case SFlowTypeMultipleDestinations:
+ return "Multiple Destinations"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+func decodeSFlow(data []byte, p gopacket.PacketBuilder) error {
+ s := &SFlowDatagram{}
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ p.SetApplicationLayer(s)
+ return nil
+}
+
+// SFlowDatagram is the outermost container which holds some basic information
+// about the reporting agent, and holds at least one sample record
+type SFlowDatagram struct {
+ BaseLayer
+
+ DatagramVersion uint32
+ AgentAddress net.IP
+ SubAgentID uint32
+ SequenceNumber uint32
+ AgentUptime uint32
+ SampleCount uint32
+ FlowSamples []SFlowFlowSample
+ CounterSamples []SFlowCounterSample
+}
+
+// An SFlow datagram's outer container has the following
+// structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sFlow version (2|4|5) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int IP version of the Agent (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Agent IP address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sub agent id |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int datagram sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int switch uptime in ms |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int n samples in datagram |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / n samples /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// SFlowDataFormat encodes the EnterpriseID in the most
+// significant 12 bits, and the SampleType in the least significant
+// 20 bits.
+type SFlowDataFormat uint32
+
+func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) {
+ leftField := sdf >> 12
+ rightField := uint32(0xFFF) & uint32(sdf)
+ return SFlowEnterpriseID(leftField), SFlowSampleType(rightField)
+}
+
+// SFlowEnterpriseID is used to differentiate between the
+// official SFlow standard, and other, vendor-specific
+// types of flow data. (Similiar to SNMP's enterprise MIB
+// OIDs) Only the office SFlow Enterprise ID is decoded
+// here.
+type SFlowEnterpriseID uint32
+
+const (
+ SFlowStandard SFlowEnterpriseID = 0
+)
+
+func (eid SFlowEnterpriseID) String() string {
+ switch eid {
+ case SFlowStandard:
+ return "Standard SFlow"
+ default:
+ return ""
+ }
+}
+
+func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID {
+ return SFlowStandard
+}
+
+// SFlowSampleType specifies the type of sample. Only flow samples
+// and counter samples are supported
+type SFlowSampleType uint32
+
+const (
+ SFlowTypeFlowSample SFlowSampleType = 1
+ SFlowTypeCounterSample SFlowSampleType = 2
+ SFlowTypeExpandedFlowSample SFlowSampleType = 3
+ SFlowTypeExpandedCounterSample SFlowSampleType = 4
+)
+
+func (st SFlowSampleType) GetType() SFlowSampleType {
+ switch st {
+ case SFlowTypeFlowSample:
+ return SFlowTypeFlowSample
+ case SFlowTypeCounterSample:
+ return SFlowTypeCounterSample
+ case SFlowTypeExpandedFlowSample:
+ return SFlowTypeExpandedFlowSample
+ case SFlowTypeExpandedCounterSample:
+ return SFlowTypeExpandedCounterSample
+ default:
+ panic("Invalid Sample Type")
+ }
+}
+
+func (st SFlowSampleType) String() string {
+ switch st {
+ case SFlowTypeFlowSample:
+ return "Flow Sample"
+ case SFlowTypeCounterSample:
+ return "Counter Sample"
+ case SFlowTypeExpandedFlowSample:
+ return "Expanded Flow Sample"
+ case SFlowTypeExpandedCounterSample:
+ return "Expanded Counter Sample"
+ default:
+ return ""
+ }
+}
+
+func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) Payload() []byte { return nil }
+
+func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+// SFlowIPType determines what form the IP address being decoded will
+// take. This is an XDR union type allowing for both IPv4 and IPv6
+type SFlowIPType uint32
+
+const (
+ SFlowIPv4 SFlowIPType = 1
+ SFlowIPv6 SFlowIPType = 2
+)
+
+func (s SFlowIPType) String() string {
+ switch s {
+ case SFlowIPv4:
+ return "IPv4"
+ case SFlowIPv6:
+ return "IPv6"
+ default:
+ return ""
+ }
+}
+
+func (s SFlowIPType) Length() int {
+ switch s {
+ case SFlowIPv4:
+ return 4
+ case SFlowIPv6:
+ return 16
+ default:
+ return 0
+ }
+}
+
+func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var agentAddressType SFlowIPType
+
+ data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4]))
+ data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()]
+ data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4])
+
+ if s.SampleCount < 1 {
+ return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount)
+ }
+ for i := uint32(0); i < s.SampleCount; i++ {
+ sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4]))
+ _, sampleType := sdf.decode()
+ switch sampleType {
+ case SFlowTypeFlowSample:
+ if flowSample, err := decodeFlowSample(&data, false); err == nil {
+ s.FlowSamples = append(s.FlowSamples, flowSample)
+ } else {
+ return err
+ }
+ case SFlowTypeCounterSample:
+ if counterSample, err := decodeCounterSample(&data, false); err == nil {
+ s.CounterSamples = append(s.CounterSamples, counterSample)
+ } else {
+ return err
+ }
+ case SFlowTypeExpandedFlowSample:
+ if flowSample, err := decodeFlowSample(&data, true); err == nil {
+ s.FlowSamples = append(s.FlowSamples, flowSample)
+ } else {
+ return err
+ }
+ case SFlowTypeExpandedCounterSample:
+ if counterSample, err := decodeCounterSample(&data, true); err == nil {
+ s.CounterSamples = append(s.CounterSamples, counterSample)
+ } else {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("Unsupported SFlow sample type %d", sampleType)
+ }
+ }
+ return nil
+}
+
+// SFlowFlowSample represents a sampled packet and contains
+// one or more records describing the packet
+type SFlowFlowSample struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowSampleType
+ SampleLength uint32
+ SequenceNumber uint32
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+ SamplingRate uint32
+ SamplePool uint32
+ Dropped uint32
+ InputInterfaceFormat uint32
+ InputInterface uint32
+ OutputInterfaceFormat uint32
+ OutputInterface uint32
+ RecordCount uint32
+ Records []SFlowRecord
+}
+
+// Flow samples have the following structure. Note
+// the bit fields to encode the Enterprise ID and the
+// Flow record format: type 1
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | sample length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |id type | src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sampling rate |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample pool |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int drops |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input ifIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output ifIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / flow records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Flow samples have the following structure.
+// Flow record format: type 3
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | sample length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int src id type |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sampling rate |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample pool |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int drops |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input interface format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input interface value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output interface format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output interface value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / flow records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowFlowDataFormat uint32
+
+func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) {
+ leftField := fdf >> 12
+ rightField := uint32(0xFFF) & uint32(fdf)
+ return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField)
+}
+
+func (fs SFlowFlowSample) GetRecords() []SFlowRecord {
+ return fs.Records
+}
+
+func (fs SFlowFlowSample) GetType() SFlowSampleType {
+ return SFlowTypeFlowSample
+}
+
+func skipRecord(data *[]byte) {
+ recordLength := int(binary.BigEndian.Uint32((*data)[4:]))
+ *data = (*data)[(recordLength+((4-recordLength)%4))+8:]
+}
+
+func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) {
+ s := SFlowFlowSample{}
+ var sdf SFlowDataFormat
+ *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ var sdc SFlowDataSource
+
+ s.EnterpriseID, s.Format = sdf.decode()
+ *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ if expanded {
+ *data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4]))
+ *data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4]))
+ } else {
+ *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+ s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+ }
+ *data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ if expanded {
+ *data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ } else {
+ *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ }
+ *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ for i := uint32(0); i < s.RecordCount; i++ {
+ rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ enterpriseID, flowRecordType := rdf.decode()
+
+ // Try to decode when EnterpriseID is 0 signaling
+ // default sflow structs are used according specification
+ // Unexpected behavior detected for e.g. with pmacct
+ if enterpriseID == 0 {
+ switch flowRecordType {
+ case SFlowTypeRawPacketFlow:
+ if record, err := decodeRawPacketFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedUserFlow:
+ if record, err := decodeExtendedUserFlow(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedUrlFlow:
+ if record, err := decodeExtendedURLRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedSwitchFlow:
+ if record, err := decodeExtendedSwitchFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedRouterFlow:
+ if record, err := decodeExtendedRouterFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedGatewayFlow:
+ if record, err := decodeExtendedGatewayFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeEthernetFrameFlow:
+ if record, err := decodeEthernetFrameFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeIpv4Flow:
+ if record, err := decodeSFlowIpv4Record(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeIpv6Flow:
+ if record, err := decodeSFlowIpv6Record(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedMlpsFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsFlow")
+ case SFlowTypeExtendedNatFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedNatFlow")
+ case SFlowTypeExtendedMlpsTunnelFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsTunnelFlow")
+ case SFlowTypeExtendedMlpsVcFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsVcFlow")
+ case SFlowTypeExtendedMlpsFecFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsFecFlow")
+ case SFlowTypeExtendedMlpsLvpFecFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow")
+ case SFlowTypeExtendedVlanFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedVlanFlow")
+ case SFlowTypeExtendedIpv4TunnelEgressFlow:
+ if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv4TunnelIngressFlow:
+ if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv6TunnelEgressFlow:
+ if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv6TunnelIngressFlow:
+ if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedDecapsulateEgressFlow:
+ if record, err := decodeExtendedDecapsulateEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedDecapsulateIngressFlow:
+ if record, err := decodeExtendedDecapsulateIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedVniEgressFlow:
+ if record, err := decodeExtendedVniEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedVniIngressFlow:
+ if record, err := decodeExtendedVniIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ default:
+ return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType)
+ }
+ } else {
+ skipRecord(data)
+ }
+ }
+ return s, nil
+}
+
+// Counter samples report information about various counter
+// objects. Typically these are items like IfInOctets, or
+// CPU / Memory stats, etc. SFlow will report these at regular
+// intervals as configured on the agent. If one were sufficiently
+// industrious, this could be used to replace the typical
+// SNMP polling used for such things.
+type SFlowCounterSample struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowSampleType
+ SampleLength uint32
+ SequenceNumber uint32
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+ RecordCount uint32
+ Records []SFlowRecord
+}
+
+// Counter samples have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |id type | src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowCounterDataFormat uint32
+
+func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) {
+ leftField := cdf >> 12
+ rightField := uint32(0xFFF) & uint32(cdf)
+ return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField)
+}
+
+// GetRecords will return a slice of interface types
+// representing records. A type switch can be used to
+// get at the underlying SFlowCounterRecordType.
+func (cs SFlowCounterSample) GetRecords() []SFlowRecord {
+ return cs.Records
+}
+
+// GetType will report the type of sample. Only the
+// compact form of counter samples is supported
+func (cs SFlowCounterSample) GetType() SFlowSampleType {
+ return SFlowTypeCounterSample
+}
+
+type SFlowCounterRecordType uint32
+
+const (
+ SFlowTypeGenericInterfaceCounters SFlowCounterRecordType = 1
+ SFlowTypeEthernetInterfaceCounters SFlowCounterRecordType = 2
+ SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3
+ SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4
+ SFlowTypeVLANCounters SFlowCounterRecordType = 5
+ SFlowTypeLACPCounters SFlowCounterRecordType = 7
+ SFlowTypeProcessorCounters SFlowCounterRecordType = 1001
+ SFlowTypeOpenflowPortCounters SFlowCounterRecordType = 1004
+ SFlowTypePORTNAMECounters SFlowCounterRecordType = 1005
+ SFLowTypeAPPRESOURCESCounters SFlowCounterRecordType = 2203
+ SFlowTypeOVSDPCounters SFlowCounterRecordType = 2207
+)
+
+func (cr SFlowCounterRecordType) String() string {
+ switch cr {
+ case SFlowTypeGenericInterfaceCounters:
+ return "Generic Interface Counters"
+ case SFlowTypeEthernetInterfaceCounters:
+ return "Ethernet Interface Counters"
+ case SFlowTypeTokenRingInterfaceCounters:
+ return "Token Ring Interface Counters"
+ case SFlowType100BaseVGInterfaceCounters:
+ return "100BaseVG Interface Counters"
+ case SFlowTypeVLANCounters:
+ return "VLAN Counters"
+ case SFlowTypeLACPCounters:
+ return "LACP Counters"
+ case SFlowTypeProcessorCounters:
+ return "Processor Counters"
+ case SFlowTypeOpenflowPortCounters:
+ return "Openflow Port Counters"
+ case SFlowTypePORTNAMECounters:
+ return "PORT NAME Counters"
+ case SFLowTypeAPPRESOURCESCounters:
+ return "App Resources Counters"
+ case SFlowTypeOVSDPCounters:
+ return "OVSDP Counters"
+ default:
+ return ""
+
+ }
+}
+
+func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) {
+ s := SFlowCounterSample{}
+ var sdc SFlowDataSource
+ var sdce SFlowDataSourceExpanded
+ var sdf SFlowDataFormat
+
+ *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ s.EnterpriseID, s.Format = sdf.decode()
+ *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ if expanded {
+ *data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))}
+ s.SourceIDClass, s.SourceIDIndex = sdce.decode()
+ } else {
+ *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+ s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+ }
+ *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ for i := uint32(0); i < s.RecordCount; i++ {
+ cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ _, counterRecordType := cdf.decode()
+ switch counterRecordType {
+ case SFlowTypeGenericInterfaceCounters:
+ if record, err := decodeGenericInterfaceCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeEthernetInterfaceCounters:
+ if record, err := decodeEthernetCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeTokenRingInterfaceCounters:
+ skipRecord(data)
+ return s, errors.New("skipping TypeTokenRingInterfaceCounters")
+ case SFlowType100BaseVGInterfaceCounters:
+ skipRecord(data)
+ return s, errors.New("skipping Type100BaseVGInterfaceCounters")
+ case SFlowTypeVLANCounters:
+ if record, err := decodeVLANCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeLACPCounters:
+ if record, err := decodeLACPCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeProcessorCounters:
+ if record, err := decodeProcessorCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeOpenflowPortCounters:
+ if record, err := decodeOpenflowportCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypePORTNAMECounters:
+ if record, err := decodePortnameCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFLowTypeAPPRESOURCESCounters:
+ if record, err := decodeAppresourcesCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeOVSDPCounters:
+ if record, err := decodeOVSDPCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ default:
+ return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType)
+ }
+ }
+ return s, nil
+}
+
+// SFlowBaseFlowRecord holds the fields common to all records
+// of type SFlowFlowRecordType
+type SFlowBaseFlowRecord struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowFlowRecordType
+ FlowDataLength uint32
+}
+
+func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType {
+ return bfr.Format
+}
+
+// SFlowFlowRecordType denotes what kind of Flow Record is
+// represented. See RFC 3176
+type SFlowFlowRecordType uint32
+
+const (
+ SFlowTypeRawPacketFlow SFlowFlowRecordType = 1
+ SFlowTypeEthernetFrameFlow SFlowFlowRecordType = 2
+ SFlowTypeIpv4Flow SFlowFlowRecordType = 3
+ SFlowTypeIpv6Flow SFlowFlowRecordType = 4
+ SFlowTypeExtendedSwitchFlow SFlowFlowRecordType = 1001
+ SFlowTypeExtendedRouterFlow SFlowFlowRecordType = 1002
+ SFlowTypeExtendedGatewayFlow SFlowFlowRecordType = 1003
+ SFlowTypeExtendedUserFlow SFlowFlowRecordType = 1004
+ SFlowTypeExtendedUrlFlow SFlowFlowRecordType = 1005
+ SFlowTypeExtendedMlpsFlow SFlowFlowRecordType = 1006
+ SFlowTypeExtendedNatFlow SFlowFlowRecordType = 1007
+ SFlowTypeExtendedMlpsTunnelFlow SFlowFlowRecordType = 1008
+ SFlowTypeExtendedMlpsVcFlow SFlowFlowRecordType = 1009
+ SFlowTypeExtendedMlpsFecFlow SFlowFlowRecordType = 1010
+ SFlowTypeExtendedMlpsLvpFecFlow SFlowFlowRecordType = 1011
+ SFlowTypeExtendedVlanFlow SFlowFlowRecordType = 1012
+ SFlowTypeExtendedIpv4TunnelEgressFlow SFlowFlowRecordType = 1023
+ SFlowTypeExtendedIpv4TunnelIngressFlow SFlowFlowRecordType = 1024
+ SFlowTypeExtendedIpv6TunnelEgressFlow SFlowFlowRecordType = 1025
+ SFlowTypeExtendedIpv6TunnelIngressFlow SFlowFlowRecordType = 1026
+ SFlowTypeExtendedDecapsulateEgressFlow SFlowFlowRecordType = 1027
+ SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028
+ SFlowTypeExtendedVniEgressFlow SFlowFlowRecordType = 1029
+ SFlowTypeExtendedVniIngressFlow SFlowFlowRecordType = 1030
+)
+
+func (rt SFlowFlowRecordType) String() string {
+ switch rt {
+ case SFlowTypeRawPacketFlow:
+ return "Raw Packet Flow Record"
+ case SFlowTypeEthernetFrameFlow:
+ return "Ethernet Frame Flow Record"
+ case SFlowTypeIpv4Flow:
+ return "IPv4 Flow Record"
+ case SFlowTypeIpv6Flow:
+ return "IPv6 Flow Record"
+ case SFlowTypeExtendedSwitchFlow:
+ return "Extended Switch Flow Record"
+ case SFlowTypeExtendedRouterFlow:
+ return "Extended Router Flow Record"
+ case SFlowTypeExtendedGatewayFlow:
+ return "Extended Gateway Flow Record"
+ case SFlowTypeExtendedUserFlow:
+ return "Extended User Flow Record"
+ case SFlowTypeExtendedUrlFlow:
+ return "Extended URL Flow Record"
+ case SFlowTypeExtendedMlpsFlow:
+ return "Extended MPLS Flow Record"
+ case SFlowTypeExtendedNatFlow:
+ return "Extended NAT Flow Record"
+ case SFlowTypeExtendedMlpsTunnelFlow:
+ return "Extended MPLS Tunnel Flow Record"
+ case SFlowTypeExtendedMlpsVcFlow:
+ return "Extended MPLS VC Flow Record"
+ case SFlowTypeExtendedMlpsFecFlow:
+ return "Extended MPLS FEC Flow Record"
+ case SFlowTypeExtendedMlpsLvpFecFlow:
+ return "Extended MPLS LVP FEC Flow Record"
+ case SFlowTypeExtendedVlanFlow:
+ return "Extended VLAN Flow Record"
+ case SFlowTypeExtendedIpv4TunnelEgressFlow:
+ return "Extended IPv4 Tunnel Egress Record"
+ case SFlowTypeExtendedIpv4TunnelIngressFlow:
+ return "Extended IPv4 Tunnel Ingress Record"
+ case SFlowTypeExtendedIpv6TunnelEgressFlow:
+ return "Extended IPv6 Tunnel Egress Record"
+ case SFlowTypeExtendedIpv6TunnelIngressFlow:
+ return "Extended IPv6 Tunnel Ingress Record"
+ case SFlowTypeExtendedDecapsulateEgressFlow:
+ return "Extended Decapsulate Egress Record"
+ case SFlowTypeExtendedDecapsulateIngressFlow:
+ return "Extended Decapsulate Ingress Record"
+ case SFlowTypeExtendedVniEgressFlow:
+ return "Extended VNI Ingress Record"
+ case SFlowTypeExtendedVniIngressFlow:
+ return "Extended VNI Ingress Record"
+ default:
+ return ""
+ }
+}
+
+// SFlowRawPacketFlowRecords hold information about a sampled
+// packet grabbed as it transited the agent. This is
+// perhaps the most useful and interesting record type,
+// as it holds the headers of the sampled packet and
+// can be used to build up a complete picture of the
+// traffic patterns on a network.
+//
+// The raw packet header is sent back into gopacket for
+// decoding, and the resulting gopackt.Packet is stored
+// in the Header member
+type SFlowRawPacketFlowRecord struct {
+ SFlowBaseFlowRecord
+ HeaderProtocol SFlowRawHeaderProtocol
+ FrameLength uint32
+ PayloadRemoved uint32
+ HeaderLength uint32
+ Header gopacket.Packet
+}
+
+// Raw packet record types have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Header Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Frame Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Payload Removed |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Header Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// \ Header \
+// \ \
+// \ \
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowRawHeaderProtocol uint32
+
+const (
+ SFlowProtoEthernet SFlowRawHeaderProtocol = 1
+ SFlowProtoISO88024 SFlowRawHeaderProtocol = 2
+ SFlowProtoISO88025 SFlowRawHeaderProtocol = 3
+ SFlowProtoFDDI SFlowRawHeaderProtocol = 4
+ SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5
+ SFlowProtoX25 SFlowRawHeaderProtocol = 6
+ SFlowProtoPPP SFlowRawHeaderProtocol = 7
+ SFlowProtoSMDS SFlowRawHeaderProtocol = 8
+ SFlowProtoAAL5 SFlowRawHeaderProtocol = 9
+ SFlowProtoAAL5_IP SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */
+ SFlowProtoIPv4 SFlowRawHeaderProtocol = 11
+ SFlowProtoIPv6 SFlowRawHeaderProtocol = 12
+ SFlowProtoMPLS SFlowRawHeaderProtocol = 13
+ SFlowProtoPOS SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */
+)
+
+func (sfhp SFlowRawHeaderProtocol) String() string {
+ switch sfhp {
+ case SFlowProtoEthernet:
+ return "ETHERNET-ISO88023"
+ case SFlowProtoISO88024:
+ return "ISO88024-TOKENBUS"
+ case SFlowProtoISO88025:
+ return "ISO88025-TOKENRING"
+ case SFlowProtoFDDI:
+ return "FDDI"
+ case SFlowProtoFrameRelay:
+ return "FRAME-RELAY"
+ case SFlowProtoX25:
+ return "X25"
+ case SFlowProtoPPP:
+ return "PPP"
+ case SFlowProtoSMDS:
+ return "SMDS"
+ case SFlowProtoAAL5:
+ return "AAL5"
+ case SFlowProtoAAL5_IP:
+ return "AAL5-IP"
+ case SFlowProtoIPv4:
+ return "IPv4"
+ case SFlowProtoIPv6:
+ return "IPv6"
+ case SFlowProtoMPLS:
+ return "MPLS"
+ case SFlowProtoPOS:
+ return "POS"
+ }
+ return "UNKNOWN"
+}
+
+func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) {
+ rec := SFlowRawPacketFlowRecord{}
+ header := []byte{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4]))
+ *data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4))
+ *data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding]
+ rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default)
+ return rec, nil
+}
+
+// SFlowExtendedSwitchFlowRecord give additional information
+// about the sampled packet if it's available. It's mainly
+// useful for getting at the incoming and outgoing VLANs
+// An agent may or may not provide this information.
+type SFlowExtendedSwitchFlowRecord struct {
+ SFlowBaseFlowRecord
+ IncomingVLAN uint32
+ IncomingVLANPriority uint32
+ OutgoingVLAN uint32
+ OutgoingVLANPriority uint32
+}
+
+// Extended switch records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Incoming VLAN |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Incoming VLAN Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Outgoing VLAN |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Outgoing VLAN Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) {
+ es := SFlowExtendedSwitchFlowRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ es.EnterpriseID, es.Format = fdf.decode()
+ *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return es, nil
+}
+
+// SFlowExtendedRouterFlowRecord gives additional information
+// about the layer 3 routing information used to forward
+// the packet
+type SFlowExtendedRouterFlowRecord struct {
+ SFlowBaseFlowRecord
+ NextHop net.IP
+ NextHopSourceMask uint32
+ NextHopDestinationMask uint32
+}
+
+// Extended router records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IP version of next hop router (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Next Hop address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Next Hop Source Mask |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Next Hop Destination Mask |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) {
+ er := SFlowExtendedRouterFlowRecord{}
+ var fdf SFlowFlowDataFormat
+ var extendedRouterAddressType SFlowIPType
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ er.EnterpriseID, er.Format = fdf.decode()
+ *data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()]
+ *data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return er, nil
+}
+
+// SFlowExtendedGatewayFlowRecord describes information treasured by
+// nework engineers everywhere: AS path information listing which
+// BGP peer sent the packet, and various other BGP related info.
+// This information is vital because it gives a picture of how much
+// traffic is being sent from / received by various BGP peers.
+
+// Extended gateway records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IP version of next hop router (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Next Hop address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Peer AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS Path Count |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / AS Path / Sequence /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Communities /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Local Pref |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// AS Path / Sequence:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS Source Type (Path=1 / Sequence=2) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Path / Sequence length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Path / Sequence Members /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Communities:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | communitiy length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / communitiy Members /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedGatewayFlowRecord struct {
+ SFlowBaseFlowRecord
+ NextHop net.IP
+ AS uint32
+ SourceAS uint32
+ PeerAS uint32
+ ASPathCount uint32
+ ASPath []SFlowASDestination
+ Communities []uint32
+ LocalPref uint32
+}
+
+type SFlowASPathType uint32
+
+const (
+ SFlowASSet SFlowASPathType = 1
+ SFlowASSequence SFlowASPathType = 2
+)
+
+func (apt SFlowASPathType) String() string {
+ switch apt {
+ case SFlowASSet:
+ return "AS Set"
+ case SFlowASSequence:
+ return "AS Sequence"
+ default:
+ return ""
+ }
+}
+
+type SFlowASDestination struct {
+ Type SFlowASPathType
+ Count uint32
+ Members []uint32
+}
+
+func (asd SFlowASDestination) String() string {
+ switch asd.Type {
+ case SFlowASSet:
+ return fmt.Sprint("AS Set:", asd.Members)
+ case SFlowASSequence:
+ return fmt.Sprint("AS Sequence:", asd.Members)
+ default:
+ return ""
+ }
+}
+
+func (ad *SFlowASDestination) decodePath(data *[]byte) {
+ *data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ ad.Members = make([]uint32, ad.Count)
+ for i := uint32(0); i < ad.Count; i++ {
+ var member uint32
+ *data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ ad.Members[i] = member
+ }
+}
+
+func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) {
+ eg := SFlowExtendedGatewayFlowRecord{}
+ var fdf SFlowFlowDataFormat
+ var extendedGatewayAddressType SFlowIPType
+ var communitiesLength uint32
+ var community uint32
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eg.EnterpriseID, eg.Format = fdf.decode()
+ *data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()]
+ *data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ for i := uint32(0); i < eg.ASPathCount; i++ {
+ asPath := SFlowASDestination{}
+ asPath.decodePath(data)
+ eg.ASPath = append(eg.ASPath, asPath)
+ }
+ *data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ eg.Communities = make([]uint32, communitiesLength)
+ for j := uint32(0); j < communitiesLength; j++ {
+ *data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ eg.Communities[j] = community
+ }
+ *data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return eg, nil
+}
+
+// **************************************************
+// Extended URL Flow Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | direction |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | URL |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Host |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowURLDirection uint32
+
+const (
+ SFlowURLsrc SFlowURLDirection = 1
+ SFlowURLdst SFlowURLDirection = 2
+)
+
+func (urld SFlowURLDirection) String() string {
+ switch urld {
+ case SFlowURLsrc:
+ return "Source address is the server"
+ case SFlowURLdst:
+ return "Destination address is the server"
+ default:
+ return ""
+ }
+}
+
+type SFlowExtendedURLRecord struct {
+ SFlowBaseFlowRecord
+ Direction SFlowURLDirection
+ URL string
+ Host string
+}
+
+func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) {
+ eur := SFlowExtendedURLRecord{}
+ var fdf SFlowFlowDataFormat
+ var urlLen uint32
+ var urlLenWithPad int
+ var hostLen uint32
+ var hostLenWithPad int
+ var urlBytes []byte
+ var hostBytes []byte
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eur.EnterpriseID, eur.Format = fdf.decode()
+ *data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4]))
+ *data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ urlLenWithPad = int(urlLen + ((4 - urlLen) % 4))
+ *data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad]
+ eur.URL = string(urlBytes[:urlLen])
+ *data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ hostLenWithPad = int(hostLen + ((4 - hostLen) % 4))
+ *data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad]
+ eur.Host = string(hostBytes[:hostLen])
+ return eur, nil
+}
+
+// **************************************************
+// Extended User Flow Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Character Set |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source User Id |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination Character Set |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination User ID |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedUserFlow struct {
+ SFlowBaseFlowRecord
+ SourceCharSet SFlowCharSet
+ SourceUserID string
+ DestinationCharSet SFlowCharSet
+ DestinationUserID string
+}
+
+type SFlowCharSet uint32
+
+const (
+ SFlowCSunknown SFlowCharSet = 2
+ SFlowCSASCII SFlowCharSet = 3
+ SFlowCSISOLatin1 SFlowCharSet = 4
+ SFlowCSISOLatin2 SFlowCharSet = 5
+ SFlowCSISOLatin3 SFlowCharSet = 6
+ SFlowCSISOLatin4 SFlowCharSet = 7
+ SFlowCSISOLatinCyrillic SFlowCharSet = 8
+ SFlowCSISOLatinArabic SFlowCharSet = 9
+ SFlowCSISOLatinGreek SFlowCharSet = 10
+ SFlowCSISOLatinHebrew SFlowCharSet = 11
+ SFlowCSISOLatin5 SFlowCharSet = 12
+ SFlowCSISOLatin6 SFlowCharSet = 13
+ SFlowCSISOTextComm SFlowCharSet = 14
+ SFlowCSHalfWidthKatakana SFlowCharSet = 15
+ SFlowCSJISEncoding SFlowCharSet = 16
+ SFlowCSShiftJIS SFlowCharSet = 17
+ SFlowCSEUCPkdFmtJapanese SFlowCharSet = 18
+ SFlowCSEUCFixWidJapanese SFlowCharSet = 19
+ SFlowCSISO4UnitedKingdom SFlowCharSet = 20
+ SFlowCSISO11SwedishForNames SFlowCharSet = 21
+ SFlowCSISO15Italian SFlowCharSet = 22
+ SFlowCSISO17Spanish SFlowCharSet = 23
+ SFlowCSISO21German SFlowCharSet = 24
+ SFlowCSISO60DanishNorwegian SFlowCharSet = 25
+ SFlowCSISO69French SFlowCharSet = 26
+ SFlowCSISO10646UTF1 SFlowCharSet = 27
+ SFlowCSISO646basic1983 SFlowCharSet = 28
+ SFlowCSINVARIANT SFlowCharSet = 29
+ SFlowCSISO2IntlRefVersion SFlowCharSet = 30
+ SFlowCSNATSSEFI SFlowCharSet = 31
+ SFlowCSNATSSEFIADD SFlowCharSet = 32
+ SFlowCSNATSDANO SFlowCharSet = 33
+ SFlowCSNATSDANOADD SFlowCharSet = 34
+ SFlowCSISO10Swedish SFlowCharSet = 35
+ SFlowCSKSC56011987 SFlowCharSet = 36
+ SFlowCSISO2022KR SFlowCharSet = 37
+ SFlowCSEUCKR SFlowCharSet = 38
+ SFlowCSISO2022JP SFlowCharSet = 39
+ SFlowCSISO2022JP2 SFlowCharSet = 40
+ SFlowCSISO13JISC6220jp SFlowCharSet = 41
+ SFlowCSISO14JISC6220ro SFlowCharSet = 42
+ SFlowCSISO16Portuguese SFlowCharSet = 43
+ SFlowCSISO18Greek7Old SFlowCharSet = 44
+ SFlowCSISO19LatinGreek SFlowCharSet = 45
+ SFlowCSISO25French SFlowCharSet = 46
+ SFlowCSISO27LatinGreek1 SFlowCharSet = 47
+ SFlowCSISO5427Cyrillic SFlowCharSet = 48
+ SFlowCSISO42JISC62261978 SFlowCharSet = 49
+ SFlowCSISO47BSViewdata SFlowCharSet = 50
+ SFlowCSISO49INIS SFlowCharSet = 51
+ SFlowCSISO50INIS8 SFlowCharSet = 52
+ SFlowCSISO51INISCyrillic SFlowCharSet = 53
+ SFlowCSISO54271981 SFlowCharSet = 54
+ SFlowCSISO5428Greek SFlowCharSet = 55
+ SFlowCSISO57GB1988 SFlowCharSet = 56
+ SFlowCSISO58GB231280 SFlowCharSet = 57
+ SFlowCSISO61Norwegian2 SFlowCharSet = 58
+ SFlowCSISO70VideotexSupp1 SFlowCharSet = 59
+ SFlowCSISO84Portuguese2 SFlowCharSet = 60
+ SFlowCSISO85Spanish2 SFlowCharSet = 61
+ SFlowCSISO86Hungarian SFlowCharSet = 62
+ SFlowCSISO87JISX0208 SFlowCharSet = 63
+ SFlowCSISO88Greek7 SFlowCharSet = 64
+ SFlowCSISO89ASMO449 SFlowCharSet = 65
+ SFlowCSISO90 SFlowCharSet = 66
+ SFlowCSISO91JISC62291984a SFlowCharSet = 67
+ SFlowCSISO92JISC62991984b SFlowCharSet = 68
+ SFlowCSISO93JIS62291984badd SFlowCharSet = 69
+ SFlowCSISO94JIS62291984hand SFlowCharSet = 70
+ SFlowCSISO95JIS62291984handadd SFlowCharSet = 71
+ SFlowCSISO96JISC62291984kana SFlowCharSet = 72
+ SFlowCSISO2033 SFlowCharSet = 73
+ SFlowCSISO99NAPLPS SFlowCharSet = 74
+ SFlowCSISO102T617bit SFlowCharSet = 75
+ SFlowCSISO103T618bit SFlowCharSet = 76
+ SFlowCSISO111ECMACyrillic SFlowCharSet = 77
+ SFlowCSa71 SFlowCharSet = 78
+ SFlowCSa72 SFlowCharSet = 79
+ SFlowCSISO123CSAZ24341985gr SFlowCharSet = 80
+ SFlowCSISO88596E SFlowCharSet = 81
+ SFlowCSISO88596I SFlowCharSet = 82
+ SFlowCSISO128T101G2 SFlowCharSet = 83
+ SFlowCSISO88598E SFlowCharSet = 84
+ SFlowCSISO88598I SFlowCharSet = 85
+ SFlowCSISO139CSN369103 SFlowCharSet = 86
+ SFlowCSISO141JUSIB1002 SFlowCharSet = 87
+ SFlowCSISO143IECP271 SFlowCharSet = 88
+ SFlowCSISO146Serbian SFlowCharSet = 89
+ SFlowCSISO147Macedonian SFlowCharSet = 90
+ SFlowCSISO150 SFlowCharSet = 91
+ SFlowCSISO151Cuba SFlowCharSet = 92
+ SFlowCSISO6937Add SFlowCharSet = 93
+ SFlowCSISO153GOST1976874 SFlowCharSet = 94
+ SFlowCSISO8859Supp SFlowCharSet = 95
+ SFlowCSISO10367Box SFlowCharSet = 96
+ SFlowCSISO158Lap SFlowCharSet = 97
+ SFlowCSISO159JISX02121990 SFlowCharSet = 98
+ SFlowCSISO646Danish SFlowCharSet = 99
+ SFlowCSUSDK SFlowCharSet = 100
+ SFlowCSDKUS SFlowCharSet = 101
+ SFlowCSKSC5636 SFlowCharSet = 102
+ SFlowCSUnicode11UTF7 SFlowCharSet = 103
+ SFlowCSISO2022CN SFlowCharSet = 104
+ SFlowCSISO2022CNEXT SFlowCharSet = 105
+ SFlowCSUTF8 SFlowCharSet = 106
+ SFlowCSISO885913 SFlowCharSet = 109
+ SFlowCSISO885914 SFlowCharSet = 110
+ SFlowCSISO885915 SFlowCharSet = 111
+ SFlowCSISO885916 SFlowCharSet = 112
+ SFlowCSGBK SFlowCharSet = 113
+ SFlowCSGB18030 SFlowCharSet = 114
+ SFlowCSOSDEBCDICDF0415 SFlowCharSet = 115
+ SFlowCSOSDEBCDICDF03IRV SFlowCharSet = 116
+ SFlowCSOSDEBCDICDF041 SFlowCharSet = 117
+ SFlowCSISO115481 SFlowCharSet = 118
+ SFlowCSKZ1048 SFlowCharSet = 119
+ SFlowCSUnicode SFlowCharSet = 1000
+ SFlowCSUCS4 SFlowCharSet = 1001
+ SFlowCSUnicodeASCII SFlowCharSet = 1002
+ SFlowCSUnicodeLatin1 SFlowCharSet = 1003
+ SFlowCSUnicodeJapanese SFlowCharSet = 1004
+ SFlowCSUnicodeIBM1261 SFlowCharSet = 1005
+ SFlowCSUnicodeIBM1268 SFlowCharSet = 1006
+ SFlowCSUnicodeIBM1276 SFlowCharSet = 1007
+ SFlowCSUnicodeIBM1264 SFlowCharSet = 1008
+ SFlowCSUnicodeIBM1265 SFlowCharSet = 1009
+ SFlowCSUnicode11 SFlowCharSet = 1010
+ SFlowCSSCSU SFlowCharSet = 1011
+ SFlowCSUTF7 SFlowCharSet = 1012
+ SFlowCSUTF16BE SFlowCharSet = 1013
+ SFlowCSUTF16LE SFlowCharSet = 1014
+ SFlowCSUTF16 SFlowCharSet = 1015
+ SFlowCSCESU8 SFlowCharSet = 1016
+ SFlowCSUTF32 SFlowCharSet = 1017
+ SFlowCSUTF32BE SFlowCharSet = 1018
+ SFlowCSUTF32LE SFlowCharSet = 1019
+ SFlowCSBOCU1 SFlowCharSet = 1020
+ SFlowCSWindows30Latin1 SFlowCharSet = 2000
+ SFlowCSWindows31Latin1 SFlowCharSet = 2001
+ SFlowCSWindows31Latin2 SFlowCharSet = 2002
+ SFlowCSWindows31Latin5 SFlowCharSet = 2003
+ SFlowCSHPRoman8 SFlowCharSet = 2004
+ SFlowCSAdobeStandardEncoding SFlowCharSet = 2005
+ SFlowCSVenturaUS SFlowCharSet = 2006
+ SFlowCSVenturaInternational SFlowCharSet = 2007
+ SFlowCSDECMCS SFlowCharSet = 2008
+ SFlowCSPC850Multilingual SFlowCharSet = 2009
+ SFlowCSPCp852 SFlowCharSet = 2010
+ SFlowCSPC8CodePage437 SFlowCharSet = 2011
+ SFlowCSPC8DanishNorwegian SFlowCharSet = 2012
+ SFlowCSPC862LatinHebrew SFlowCharSet = 2013
+ SFlowCSPC8Turkish SFlowCharSet = 2014
+ SFlowCSIBMSymbols SFlowCharSet = 2015
+ SFlowCSIBMThai SFlowCharSet = 2016
+ SFlowCSHPLegal SFlowCharSet = 2017
+ SFlowCSHPPiFont SFlowCharSet = 2018
+ SFlowCSHPMath8 SFlowCharSet = 2019
+ SFlowCSHPPSMath SFlowCharSet = 2020
+ SFlowCSHPDesktop SFlowCharSet = 2021
+ SFlowCSVenturaMath SFlowCharSet = 2022
+ SFlowCSMicrosoftPublishing SFlowCharSet = 2023
+ SFlowCSWindows31J SFlowCharSet = 2024
+ SFlowCSGB2312 SFlowCharSet = 2025
+ SFlowCSBig5 SFlowCharSet = 2026
+ SFlowCSMacintosh SFlowCharSet = 2027
+ SFlowCSIBM037 SFlowCharSet = 2028
+ SFlowCSIBM038 SFlowCharSet = 2029
+ SFlowCSIBM273 SFlowCharSet = 2030
+ SFlowCSIBM274 SFlowCharSet = 2031
+ SFlowCSIBM275 SFlowCharSet = 2032
+ SFlowCSIBM277 SFlowCharSet = 2033
+ SFlowCSIBM278 SFlowCharSet = 2034
+ SFlowCSIBM280 SFlowCharSet = 2035
+ SFlowCSIBM281 SFlowCharSet = 2036
+ SFlowCSIBM284 SFlowCharSet = 2037
+ SFlowCSIBM285 SFlowCharSet = 2038
+ SFlowCSIBM290 SFlowCharSet = 2039
+ SFlowCSIBM297 SFlowCharSet = 2040
+ SFlowCSIBM420 SFlowCharSet = 2041
+ SFlowCSIBM423 SFlowCharSet = 2042
+ SFlowCSIBM424 SFlowCharSet = 2043
+ SFlowCSIBM500 SFlowCharSet = 2044
+ SFlowCSIBM851 SFlowCharSet = 2045
+ SFlowCSIBM855 SFlowCharSet = 2046
+ SFlowCSIBM857 SFlowCharSet = 2047
+ SFlowCSIBM860 SFlowCharSet = 2048
+ SFlowCSIBM861 SFlowCharSet = 2049
+ SFlowCSIBM863 SFlowCharSet = 2050
+ SFlowCSIBM864 SFlowCharSet = 2051
+ SFlowCSIBM865 SFlowCharSet = 2052
+ SFlowCSIBM868 SFlowCharSet = 2053
+ SFlowCSIBM869 SFlowCharSet = 2054
+ SFlowCSIBM870 SFlowCharSet = 2055
+ SFlowCSIBM871 SFlowCharSet = 2056
+ SFlowCSIBM880 SFlowCharSet = 2057
+ SFlowCSIBM891 SFlowCharSet = 2058
+ SFlowCSIBM903 SFlowCharSet = 2059
+ SFlowCSIBBM904 SFlowCharSet = 2060
+ SFlowCSIBM905 SFlowCharSet = 2061
+ SFlowCSIBM918 SFlowCharSet = 2062
+ SFlowCSIBM1026 SFlowCharSet = 2063
+ SFlowCSIBMEBCDICATDE SFlowCharSet = 2064
+ SFlowCSEBCDICATDEA SFlowCharSet = 2065
+ SFlowCSEBCDICCAFR SFlowCharSet = 2066
+ SFlowCSEBCDICDKNO SFlowCharSet = 2067
+ SFlowCSEBCDICDKNOA SFlowCharSet = 2068
+ SFlowCSEBCDICFISE SFlowCharSet = 2069
+ SFlowCSEBCDICFISEA SFlowCharSet = 2070
+ SFlowCSEBCDICFR SFlowCharSet = 2071
+ SFlowCSEBCDICIT SFlowCharSet = 2072
+ SFlowCSEBCDICPT SFlowCharSet = 2073
+ SFlowCSEBCDICES SFlowCharSet = 2074
+ SFlowCSEBCDICESA SFlowCharSet = 2075
+ SFlowCSEBCDICESS SFlowCharSet = 2076
+ SFlowCSEBCDICUK SFlowCharSet = 2077
+ SFlowCSEBCDICUS SFlowCharSet = 2078
+ SFlowCSUnknown8BiT SFlowCharSet = 2079
+ SFlowCSMnemonic SFlowCharSet = 2080
+ SFlowCSMnem SFlowCharSet = 2081
+ SFlowCSVISCII SFlowCharSet = 2082
+ SFlowCSVIQR SFlowCharSet = 2083
+ SFlowCSKOI8R SFlowCharSet = 2084
+ SFlowCSHZGB2312 SFlowCharSet = 2085
+ SFlowCSIBM866 SFlowCharSet = 2086
+ SFlowCSPC775Baltic SFlowCharSet = 2087
+ SFlowCSKOI8U SFlowCharSet = 2088
+ SFlowCSIBM00858 SFlowCharSet = 2089
+ SFlowCSIBM00924 SFlowCharSet = 2090
+ SFlowCSIBM01140 SFlowCharSet = 2091
+ SFlowCSIBM01141 SFlowCharSet = 2092
+ SFlowCSIBM01142 SFlowCharSet = 2093
+ SFlowCSIBM01143 SFlowCharSet = 2094
+ SFlowCSIBM01144 SFlowCharSet = 2095
+ SFlowCSIBM01145 SFlowCharSet = 2096
+ SFlowCSIBM01146 SFlowCharSet = 2097
+ SFlowCSIBM01147 SFlowCharSet = 2098
+ SFlowCSIBM01148 SFlowCharSet = 2099
+ SFlowCSIBM01149 SFlowCharSet = 2100
+ SFlowCSBig5HKSCS SFlowCharSet = 2101
+ SFlowCSIBM1047 SFlowCharSet = 2102
+ SFlowCSPTCP154 SFlowCharSet = 2103
+ SFlowCSAmiga1251 SFlowCharSet = 2104
+ SFlowCSKOI7switched SFlowCharSet = 2105
+ SFlowCSBRF SFlowCharSet = 2106
+ SFlowCSTSCII SFlowCharSet = 2107
+ SFlowCSCP51932 SFlowCharSet = 2108
+ SFlowCSWindows874 SFlowCharSet = 2109
+ SFlowCSWindows1250 SFlowCharSet = 2250
+ SFlowCSWindows1251 SFlowCharSet = 2251
+ SFlowCSWindows1252 SFlowCharSet = 2252
+ SFlowCSWindows1253 SFlowCharSet = 2253
+ SFlowCSWindows1254 SFlowCharSet = 2254
+ SFlowCSWindows1255 SFlowCharSet = 2255
+ SFlowCSWindows1256 SFlowCharSet = 2256
+ SFlowCSWindows1257 SFlowCharSet = 2257
+ SFlowCSWindows1258 SFlowCharSet = 2258
+ SFlowCSTIS620 SFlowCharSet = 2259
+ SFlowCS50220 SFlowCharSet = 2260
+ SFlowCSreserved SFlowCharSet = 3000
+)
+
+func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) {
+ eu := SFlowExtendedUserFlow{}
+ var fdf SFlowFlowDataFormat
+ var srcUserLen uint32
+ var srcUserLenWithPad int
+ var srcUserBytes []byte
+ var dstUserLen uint32
+ var dstUserLenWithPad int
+ var dstUserBytes []byte
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eu.EnterpriseID, eu.Format = fdf.decode()
+ *data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+ *data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4))
+ *data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad]
+ eu.SourceUserID = string(srcUserBytes[:srcUserLen])
+ *data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+ *data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4))
+ *data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad]
+ eu.DestinationUserID = string(dstUserBytes[:dstUserLen])
+ return eu, nil
+}
+
+// **************************************************
+// Packet IP version 4 Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destionation Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TCP Flags |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TOS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv4Record struct {
+ // The length of the IP packet excluding ower layer encapsulations
+ Length uint32
+ // IP Protocol type (for example, TCP = 6, UDP = 17)
+ Protocol uint32
+ // Source IP Address
+ IPSrc net.IP
+ // Destination IP Address
+ IPDst net.IP
+ // TCP/UDP source port number or equivalent
+ PortSrc uint32
+ // TCP/UDP destination port number or equivalent
+ PortDst uint32
+ // TCP flags
+ TCPFlags uint32
+ // IP type of service
+ TOS uint32
+}
+
+func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) {
+ si := SFlowIpv4Record{}
+
+ *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.IPSrc = (*data)[4:], net.IP((*data)[:4])
+ *data, si.IPDst = (*data)[4:], net.IP((*data)[:4])
+ *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return si, nil
+}
+
+// **************************************************
+// Packet IP version 6 Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destionation Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TCP Flags |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv6Record struct {
+ // The length of the IP packet excluding ower layer encapsulations
+ Length uint32
+ // IP Protocol type (for example, TCP = 6, UDP = 17)
+ Protocol uint32
+ // Source IP Address
+ IPSrc net.IP
+ // Destination IP Address
+ IPDst net.IP
+ // TCP/UDP source port number or equivalent
+ PortSrc uint32
+ // TCP/UDP destination port number or equivalent
+ PortDst uint32
+ // TCP flags
+ TCPFlags uint32
+ // IP priority
+ Priority uint32
+}
+
+func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) {
+ si := SFlowIpv6Record{}
+
+ *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.IPSrc = (*data)[16:], net.IP((*data)[:16])
+ *data, si.IPDst = (*data)[16:], net.IP((*data)[:16])
+ *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return si, nil
+}
+
+// **************************************************
+// Extended IPv4 Tunnel Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 4 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelEgressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) {
+ rec := SFlowExtendedIpv4TunnelEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv4 Tunnel Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 4 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelIngressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) {
+ rec := SFlowExtendedIpv4TunnelIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv6 Tunnel Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 6 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelEgressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) {
+ rec := SFlowExtendedIpv6TunnelEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv6 Tunnel Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 6 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelIngressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) {
+ rec := SFlowExtendedIpv6TunnelIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended Decapsulate Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Inner Header Offset |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateEgressRecord struct {
+ SFlowBaseFlowRecord
+ InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) {
+ rec := SFlowExtendedDecapsulateEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended Decapsulate Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Inner Header Offset |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateIngressRecord struct {
+ SFlowBaseFlowRecord
+ InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) {
+ rec := SFlowExtendedDecapsulateIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended VNI Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | VNI |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniEgressRecord struct {
+ SFlowBaseFlowRecord
+ VNI uint32
+}
+
+func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) {
+ rec := SFlowExtendedVniEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended VNI Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | VNI |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniIngressRecord struct {
+ SFlowBaseFlowRecord
+ VNI uint32
+}
+
+func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) {
+ rec := SFlowExtendedVniIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter data /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowBaseCounterRecord struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowCounterRecordType
+ FlowDataLength uint32
+}
+
+func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType {
+ switch bcr.Format {
+ case SFlowTypeGenericInterfaceCounters:
+ return SFlowTypeGenericInterfaceCounters
+ case SFlowTypeEthernetInterfaceCounters:
+ return SFlowTypeEthernetInterfaceCounters
+ case SFlowTypeTokenRingInterfaceCounters:
+ return SFlowTypeTokenRingInterfaceCounters
+ case SFlowType100BaseVGInterfaceCounters:
+ return SFlowType100BaseVGInterfaceCounters
+ case SFlowTypeVLANCounters:
+ return SFlowTypeVLANCounters
+ case SFlowTypeLACPCounters:
+ return SFlowTypeLACPCounters
+ case SFlowTypeProcessorCounters:
+ return SFlowTypeProcessorCounters
+ case SFlowTypeOpenflowPortCounters:
+ return SFlowTypeOpenflowPortCounters
+ case SFlowTypePORTNAMECounters:
+ return SFlowTypePORTNAMECounters
+ case SFLowTypeAPPRESOURCESCounters:
+ return SFLowTypeAPPRESOURCESCounters
+ case SFlowTypeOVSDPCounters:
+ return SFlowTypeOVSDPCounters
+ }
+ unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format)
+ panic(unrecognized)
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfType |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfSpeed |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfDirection |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfStatus |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IFInOctets |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInUcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInMulticastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInBroadcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInDiscards |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | InInErrors |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInUnknownProtos |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutOctets |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutUcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutMulticastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutBroadcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutDiscards |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOUtErrors |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfPromiscouousMode |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowGenericInterfaceCounters struct {
+ SFlowBaseCounterRecord
+ IfIndex uint32
+ IfType uint32
+ IfSpeed uint64
+ IfDirection uint32
+ IfStatus uint32
+ IfInOctets uint64
+ IfInUcastPkts uint32
+ IfInMulticastPkts uint32
+ IfInBroadcastPkts uint32
+ IfInDiscards uint32
+ IfInErrors uint32
+ IfInUnknownProtos uint32
+ IfOutOctets uint64
+ IfOutUcastPkts uint32
+ IfOutMulticastPkts uint32
+ IfOutBroadcastPkts uint32
+ IfOutDiscards uint32
+ IfOutErrors uint32
+ IfPromiscuousMode uint32
+}
+
+func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) {
+ gic := SFlowGenericInterfaceCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ gic.EnterpriseID, gic.Format = cdf.decode()
+ *data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return gic, nil
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter data /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowEthernetCounters struct {
+ SFlowBaseCounterRecord
+ AlignmentErrors uint32
+ FCSErrors uint32
+ SingleCollisionFrames uint32
+ MultipleCollisionFrames uint32
+ SQETestErrors uint32
+ DeferredTransmissions uint32
+ LateCollisions uint32
+ ExcessiveCollisions uint32
+ InternalMacTransmitErrors uint32
+ CarrierSenseErrors uint32
+ FrameTooLongs uint32
+ InternalMacReceiveErrors uint32
+ SymbolErrors uint32
+}
+
+func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) {
+ ec := SFlowEthernetCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ ec.EnterpriseID, ec.Format = cdf.decode()
+ *data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return ec, nil
+}
+
+// VLAN Counter
+
+type SFlowVLANCounters struct {
+ SFlowBaseCounterRecord
+ VlanID uint32
+ Octets uint64
+ UcastPkts uint32
+ MulticastPkts uint32
+ BroadcastPkts uint32
+ Discards uint32
+}
+
+func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) {
+ vc := SFlowVLANCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ vc.EnterpriseID, vc.Format = cdf.decode()
+ vc.EnterpriseID, vc.Format = cdf.decode()
+ *data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return vc, nil
+}
+
+//SFLLACPportState : SFlow LACP Port State (All(4) - 32 bit)
+type SFLLACPPortState struct {
+ PortStateAll uint32
+}
+
+//LACPcounters : LACP SFlow Counters ( 64 Bytes )
+type SFlowLACPCounters struct {
+ SFlowBaseCounterRecord
+ ActorSystemID net.HardwareAddr
+ PartnerSystemID net.HardwareAddr
+ AttachedAggID uint32
+ LacpPortState SFLLACPPortState
+ LACPDUsRx uint32
+ MarkerPDUsRx uint32
+ MarkerResponsePDUsRx uint32
+ UnknownRx uint32
+ IllegalRx uint32
+ LACPDUsTx uint32
+ MarkerPDUsTx uint32
+ MarkerResponsePDUsTx uint32
+}
+
+func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) {
+ la := SFlowLACPCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ la.EnterpriseID, la.Format = cdf.decode()
+ *data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.ActorSystemID = (*data)[6:], (*data)[:6]
+ *data = (*data)[2:] // remove padding
+ *data, la.PartnerSystemID = (*data)[6:], (*data)[:6]
+ *data = (*data)[2:] //remove padding
+ *data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return la, nil
+
+}
+
+// **************************************************
+// Processor Counter Record
+// **************************************************
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | FiveSecCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | OneMinCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | GiveMinCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TotalMemory |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | FreeMemory |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowProcessorCounters struct {
+ SFlowBaseCounterRecord
+ FiveSecCpu uint32 // 5 second average CPU utilization
+ OneMinCpu uint32 // 1 minute average CPU utilization
+ FiveMinCpu uint32 // 5 minute average CPU utilization
+ TotalMemory uint64 // total memory (in bytes)
+ FreeMemory uint64 // free memory (in bytes)
+}
+
+func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) {
+ pc := SFlowProcessorCounters{}
+ var cdf SFlowCounterDataFormat
+ var high32, low32 uint32
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ pc.EnterpriseID, pc.Format = cdf.decode()
+ *data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ *data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pc.TotalMemory = (uint64(high32) << 32) + uint64(low32)
+ *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pc.FreeMemory = (uint64(high32)) + uint64(low32)
+
+ return pc, nil
+}
+
+// SFlowEthernetFrameFlowRecord give additional information
+// about the sampled packet if it's available.
+// An agent may or may not provide this information.
+type SFlowEthernetFrameFlowRecord struct {
+ SFlowBaseFlowRecord
+ FrameLength uint32
+ SrcMac net.HardwareAddr
+ DstMac net.HardwareAddr
+ Type uint32
+}
+
+// Ethernet frame flow records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Mac Address |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination Mac Address |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Ethernet Packet Type |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) {
+ es := SFlowEthernetFrameFlowRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ es.EnterpriseID, es.Format = fdf.decode()
+ *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ *data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+ *data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+ *data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return es, nil
+}
+
+//SFlowOpenflowPortCounters : OVS-Sflow OpenFlow Port Counter ( 20 Bytes )
+type SFlowOpenflowPortCounters struct {
+ SFlowBaseCounterRecord
+ DatapathID uint64
+ PortNo uint32
+}
+
+func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) {
+ ofp := SFlowOpenflowPortCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ ofp.EnterpriseID, ofp.Format = cdf.decode()
+ *data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return ofp, nil
+}
+
+//SFlowAppresourcesCounters : OVS_Sflow App Resources Counter ( 48 Bytes )
+type SFlowAppresourcesCounters struct {
+ SFlowBaseCounterRecord
+ UserTime uint32
+ SystemTime uint32
+ MemUsed uint64
+ MemMax uint64
+ FdOpen uint32
+ FdMax uint32
+ ConnOpen uint32
+ ConnMax uint32
+}
+
+func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) {
+ app := SFlowAppresourcesCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ app.EnterpriseID, app.Format = cdf.decode()
+ *data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return app, nil
+}
+
+//SFlowOVSDPCounters : OVS-Sflow DataPath Counter ( 32 Bytes )
+type SFlowOVSDPCounters struct {
+ SFlowBaseCounterRecord
+ NHit uint32
+ NMissed uint32
+ NLost uint32
+ NMaskHit uint32
+ NFlows uint32
+ NMasks uint32
+}
+
+func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) {
+ dp := SFlowOVSDPCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ dp.EnterpriseID, dp.Format = cdf.decode()
+ *data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return dp, nil
+}
+
+//SFlowPORTNAME : OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes )
+type SFlowPORTNAME struct {
+ SFlowBaseCounterRecord
+ Len uint32
+ Str string
+}
+
+func decodeString(data *[]byte) (len uint32, str string) {
+ *data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ str = string((*data)[:len])
+ if (len % 4) != 0 {
+ len += 4 - len%4
+ }
+ *data = (*data)[len:]
+ return
+}
+
+func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) {
+ pn := SFlowPORTNAME{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ pn.EnterpriseID, pn.Format = cdf.decode()
+ *data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pn.Len, pn.Str = decodeString(data)
+
+ return pn, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go
new file mode 100644
index 0000000..5403688
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sip.go
@@ -0,0 +1,546 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// SIPVersion defines the different versions of the SIP Protocol
+type SIPVersion uint8
+
+// Represents all the versions of SIP protocol
+const (
+ SIPVersion1 SIPVersion = 1
+ SIPVersion2 SIPVersion = 2
+)
+
+func (sv SIPVersion) String() string {
+ switch sv {
+ default:
+ // Defaulting to SIP/2.0
+ return "SIP/2.0"
+ case SIPVersion1:
+ return "SIP/1.0"
+ case SIPVersion2:
+ return "SIP/2.0"
+ }
+}
+
+// GetSIPVersion is used to get SIP version constant
+func GetSIPVersion(version string) (SIPVersion, error) {
+ switch strings.ToUpper(version) {
+ case "SIP/1.0":
+ return SIPVersion1, nil
+ case "SIP/2.0":
+ return SIPVersion2, nil
+ default:
+ return 0, fmt.Errorf("Unknown SIP version: '%s'", version)
+
+ }
+}
+
+// SIPMethod defines the different methods of the SIP Protocol
+// defined in the different RFC's
+type SIPMethod uint16
+
+// Here are all the SIP methods
+const (
+ SIPMethodInvite SIPMethod = 1 // INVITE [RFC3261]
+ SIPMethodAck SIPMethod = 2 // ACK [RFC3261]
+ SIPMethodBye SIPMethod = 3 // BYE [RFC3261]
+ SIPMethodCancel SIPMethod = 4 // CANCEL [RFC3261]
+ SIPMethodOptions SIPMethod = 5 // OPTIONS [RFC3261]
+ SIPMethodRegister SIPMethod = 6 // REGISTER [RFC3261]
+ SIPMethodPrack SIPMethod = 7 // PRACK [RFC3262]
+ SIPMethodSubscribe SIPMethod = 8 // SUBSCRIBE [RFC6665]
+ SIPMethodNotify SIPMethod = 9 // NOTIFY [RFC6665]
+ SIPMethodPublish SIPMethod = 10 // PUBLISH [RFC3903]
+ SIPMethodInfo SIPMethod = 11 // INFO [RFC6086]
+ SIPMethodRefer SIPMethod = 12 // REFER [RFC3515]
+ SIPMethodMessage SIPMethod = 13 // MESSAGE [RFC3428]
+ SIPMethodUpdate SIPMethod = 14 // UPDATE [RFC3311]
+ SIPMethodPing SIPMethod = 15 // PING [https://tools.ietf.org/html/draft-fwmiller-ping-03]
+)
+
+func (sm SIPMethod) String() string {
+ switch sm {
+ default:
+ return "Unknown method"
+ case SIPMethodInvite:
+ return "INVITE"
+ case SIPMethodAck:
+ return "ACK"
+ case SIPMethodBye:
+ return "BYE"
+ case SIPMethodCancel:
+ return "CANCEL"
+ case SIPMethodOptions:
+ return "OPTIONS"
+ case SIPMethodRegister:
+ return "REGISTER"
+ case SIPMethodPrack:
+ return "PRACK"
+ case SIPMethodSubscribe:
+ return "SUBSCRIBE"
+ case SIPMethodNotify:
+ return "NOTIFY"
+ case SIPMethodPublish:
+ return "PUBLISH"
+ case SIPMethodInfo:
+ return "INFO"
+ case SIPMethodRefer:
+ return "REFER"
+ case SIPMethodMessage:
+ return "MESSAGE"
+ case SIPMethodUpdate:
+ return "UPDATE"
+ case SIPMethodPing:
+ return "PING"
+ }
+}
+
+// GetSIPMethod returns the constant of a SIP method
+// from its string
+func GetSIPMethod(method string) (SIPMethod, error) {
+ switch strings.ToUpper(method) {
+ case "INVITE":
+ return SIPMethodInvite, nil
+ case "ACK":
+ return SIPMethodAck, nil
+ case "BYE":
+ return SIPMethodBye, nil
+ case "CANCEL":
+ return SIPMethodCancel, nil
+ case "OPTIONS":
+ return SIPMethodOptions, nil
+ case "REGISTER":
+ return SIPMethodRegister, nil
+ case "PRACK":
+ return SIPMethodPrack, nil
+ case "SUBSCRIBE":
+ return SIPMethodSubscribe, nil
+ case "NOTIFY":
+ return SIPMethodNotify, nil
+ case "PUBLISH":
+ return SIPMethodPublish, nil
+ case "INFO":
+ return SIPMethodInfo, nil
+ case "REFER":
+ return SIPMethodRefer, nil
+ case "MESSAGE":
+ return SIPMethodMessage, nil
+ case "UPDATE":
+ return SIPMethodUpdate, nil
+ case "PING":
+ return SIPMethodPing, nil
+ default:
+ return 0, fmt.Errorf("Unknown SIP method: '%s'", method)
+ }
+}
+
+// Here is a correspondance between long header names and short
+// as defined in rfc3261 in section 20
+var compactSipHeadersCorrespondance = map[string]string{
+ "accept-contact": "a",
+ "allow-events": "u",
+ "call-id": "i",
+ "contact": "m",
+ "content-encoding": "e",
+ "content-length": "l",
+ "content-type": "c",
+ "event": "o",
+ "from": "f",
+ "identity": "y",
+ "refer-to": "r",
+ "referred-by": "b",
+ "reject-contact": "j",
+ "request-disposition": "d",
+ "session-expires": "x",
+ "subject": "s",
+ "supported": "k",
+ "to": "t",
+ "via": "v",
+}
+
+// SIP object will contains information about decoded SIP packet.
+// -> The SIP Version
+// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name
+// -> The SIP Method
+// -> The SIP Response code (if it's a response)
+// -> The SIP Status line (if it's a response)
+// You can easily know the type of the packet with the IsResponse boolean
+//
+type SIP struct {
+ BaseLayer
+
+ // Base information
+ Version SIPVersion
+ Method SIPMethod
+ Headers map[string][]string
+
+ // Request
+ RequestURI string
+
+ // Response
+ IsResponse bool
+ ResponseCode int
+ ResponseStatus string
+
+ // Private fields
+ cseq int64
+ contentLength int64
+ lastHeaderParsed string
+}
+
+// decodeSIP decodes the byte slice into a SIP type. It also
+// setups the application Layer in PacketBuilder.
+func decodeSIP(data []byte, p gopacket.PacketBuilder) error {
+ s := NewSIP()
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ p.SetApplicationLayer(s)
+ return nil
+}
+
+// NewSIP instantiates a new empty SIP object
+func NewSIP() *SIP {
+ s := new(SIP)
+ s.Headers = make(map[string][]string)
+ return s
+}
+
+// LayerType returns gopacket.LayerTypeSIP.
+func (s *SIP) LayerType() gopacket.LayerType {
+ return LayerTypeSIP
+}
+
+// Payload returns the base layer payload
+func (s *SIP) Payload() []byte {
+ return s.BaseLayer.Payload
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode
+func (s *SIP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer
+func (s *SIP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the slice into the SIP struct.
+func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // Init some vars for parsing follow-up
+ var countLines int
+ var line []byte
+ var err error
+
+ // Clean leading new line
+ data = bytes.Trim(data, "\n")
+
+ // Iterate on all lines of the SIP Headers
+ // and stop when we reach the SDP (aka when the new line
+ // is at index 0 of the remaining packet)
+ buffer := bytes.NewBuffer(data)
+
+ for {
+
+ // Read next line
+ line, err = buffer.ReadBytes(byte('\n'))
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return err
+ }
+ }
+
+ // Trim the new line delimiters
+ line = bytes.Trim(line, "\r\n")
+
+ // Empty line, we hit Body
+ // Putting packet remain in Paypload
+ if len(line) == 0 {
+ s.BaseLayer.Payload = buffer.Bytes()
+ break
+ }
+
+ // First line is the SIP request/response line
+ // Other lines are headers
+ if countLines == 0 {
+ err = s.ParseFirstLine(line)
+ if err != nil {
+ return err
+ }
+
+ } else {
+ err = s.ParseHeader(line)
+ if err != nil {
+ return err
+ }
+ }
+
+ countLines++
+ }
+
+ return nil
+}
+
+// ParseFirstLine will compute the first line of a SIP packet.
+// The first line will tell us if it's a request or a response.
+//
+// Examples of first line of SIP Prococol :
+//
+// Request : INVITE bob@example.com SIP/2.0
+// Response : SIP/2.0 200 OK
+// Response : SIP/2.0 501 Not Implemented
+//
+func (s *SIP) ParseFirstLine(firstLine []byte) error {
+
+ var err error
+
+ // Splits line by space
+ splits := strings.SplitN(string(firstLine), " ", 3)
+
+ // We must have at least 3 parts
+ if len(splits) < 3 {
+ return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine))
+ }
+
+ // Determine the SIP packet type
+ if strings.HasPrefix(splits[0], "SIP") {
+
+ // --> Response
+ s.IsResponse = true
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[0])
+ if err != nil {
+ return err
+ }
+
+ // Compute code
+ s.ResponseCode, err = strconv.Atoi(splits[1])
+ if err != nil {
+ return err
+ }
+
+ // Compute status line
+ s.ResponseStatus = splits[2]
+
+ } else {
+
+ // --> Request
+
+ // Validate method
+ s.Method, err = GetSIPMethod(splits[0])
+ if err != nil {
+ return err
+ }
+
+ s.RequestURI = splits[1]
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[2])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseHeader will parse a SIP Header
+// SIP Headers are quite simple, there are colon separated name and value
+// Headers can be spread over multiple lines
+//
+// Examples of header :
+//
+// CSeq: 1 REGISTER
+// Via: SIP/2.0/UDP there.com:5060
+// Authorization:Digest username="UserB",
+// realm="MCI WorldCom SIP",
+// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="",
+// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824"
+//
+func (s *SIP) ParseHeader(header []byte) (err error) {
+
+ // Ignore empty headers
+ if len(header) == 0 {
+ return
+ }
+
+ // Check if this is the following of last header
+ // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of
+ // multiline headers must begin by SP or TAB
+ if header[0] == '\t' || header[0] == ' ' {
+
+ header = bytes.TrimSpace(header)
+ s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header))
+ return
+ }
+
+ // Find the ':' to separate header name and value
+ index := bytes.Index(header, []byte(":"))
+ if index >= 0 {
+
+ headerName := strings.ToLower(string(bytes.Trim(header[:index], " ")))
+ headerValue := string(bytes.Trim(header[index+1:], " "))
+
+ // Add header to object
+ s.Headers[headerName] = append(s.Headers[headerName], headerValue)
+ s.lastHeaderParsed = headerName
+
+ // Compute specific headers
+ err = s.ParseSpecificHeaders(headerName, headerValue)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseSpecificHeaders will parse some specific key values from
+// specific headers like CSeq or Content-Length integer values
+func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) {
+
+ switch headerName {
+ case "cseq":
+
+ // CSeq header value is formatted like that :
+ // CSeq: 123 INVITE
+ // We split the value to parse Cseq integer value, and method
+ splits := strings.Split(headerValue, " ")
+ if len(splits) > 1 {
+
+ // Parse Cseq
+ s.cseq, err = strconv.ParseInt(splits[0], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ // Validate method
+ if s.IsResponse {
+ s.Method, err = GetSIPMethod(splits[1])
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ case "content-length":
+
+ // Parse Content-Length
+ s.contentLength, err = strconv.ParseInt(headerValue, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetAllHeaders will return the full headers of the
+// current SIP packets in a map[string][]string
+func (s *SIP) GetAllHeaders() map[string][]string {
+ return s.Headers
+}
+
+// GetHeader will return all the headers with
+// the specified name.
+func (s *SIP) GetHeader(headerName string) []string {
+ headerName = strings.ToLower(headerName)
+ h := make([]string, 0)
+ if _, ok := s.Headers[headerName]; ok {
+ if len(s.Headers[headerName]) > 0 {
+ return s.Headers[headerName]
+ } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+ return s.Headers[compactSipHeadersCorrespondance[headerName]]
+ }
+ }
+ return h
+}
+
+// GetFirstHeader will return the first header with
+// the specified name. If the current SIP packet has multiple
+// headers with the same name, it returns the first.
+func (s *SIP) GetFirstHeader(headerName string) string {
+ headerName = strings.ToLower(headerName)
+ if _, ok := s.Headers[headerName]; ok {
+ if len(s.Headers[headerName]) > 0 {
+ return s.Headers[headerName][0]
+ } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+ return s.Headers[compactSipHeadersCorrespondance[headerName]][0]
+ }
+ }
+ return ""
+}
+
+//
+// Some handy getters for most used SIP headers
+//
+
+// GetAuthorization will return the Authorization
+// header of the current SIP packet
+func (s *SIP) GetAuthorization() string {
+ return s.GetFirstHeader("Authorization")
+}
+
+// GetFrom will return the From
+// header of the current SIP packet
+func (s *SIP) GetFrom() string {
+ return s.GetFirstHeader("From")
+}
+
+// GetTo will return the To
+// header of the current SIP packet
+func (s *SIP) GetTo() string {
+ return s.GetFirstHeader("To")
+}
+
+// GetContact will return the Contact
+// header of the current SIP packet
+func (s *SIP) GetContact() string {
+ return s.GetFirstHeader("Contact")
+}
+
+// GetCallID will return the Call-ID
+// header of the current SIP packet
+func (s *SIP) GetCallID() string {
+ return s.GetFirstHeader("Call-ID")
+}
+
+// GetUserAgent will return the User-Agent
+// header of the current SIP packet
+func (s *SIP) GetUserAgent() string {
+ return s.GetFirstHeader("User-Agent")
+}
+
+// GetContentLength will return the parsed integer
+// Content-Length header of the current SIP packet
+func (s *SIP) GetContentLength() int64 {
+ return s.contentLength
+}
+
+// GetCSeq will return the parsed integer CSeq header
+// header of the current SIP packet
+func (s *SIP) GetCSeq() int64 {
+ return s.cseq
+}
diff --git a/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/google/gopacket/layers/stp.go
new file mode 100644
index 0000000..bde7d7c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/stp.go
@@ -0,0 +1,27 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message.
+type STP struct {
+ BaseLayer
+}
+
+// LayerType returns gopacket.LayerTypeSTP.
+func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP }
+
+func decodeSTP(data []byte, p gopacket.PacketBuilder) error {
+ stp := &STP{}
+ stp.Contents = data[:]
+ // TODO: parse the STP protocol into actual subfields.
+ p.AddLayer(stp)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/google/gopacket/layers/tcp.go
new file mode 100644
index 0000000..6b37f56
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcp.go
@@ -0,0 +1,337 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// TCP is the layer for TCP headers.
+type TCP struct {
+ BaseLayer
+ SrcPort, DstPort TCPPort
+ Seq uint32
+ Ack uint32
+ DataOffset uint8
+ FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool
+ Window uint16
+ Checksum uint16
+ Urgent uint16
+ sPort, dPort []byte
+ Options []TCPOption
+ Padding []byte
+ opts [4]TCPOption
+ tcpipchecksum
+}
+
+// TCPOptionKind represents a TCP option code.
+type TCPOptionKind uint8
+
+const (
+ TCPOptionKindEndList = 0
+ TCPOptionKindNop = 1
+ TCPOptionKindMSS = 2 // len = 4
+ TCPOptionKindWindowScale = 3 // len = 3
+ TCPOptionKindSACKPermitted = 4 // len = 2
+ TCPOptionKindSACK = 5 // len = n
+ TCPOptionKindEcho = 6 // len = 6, obsolete
+ TCPOptionKindEchoReply = 7 // len = 6, obsolete
+ TCPOptionKindTimestamps = 8 // len = 10
+ TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete
+ TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete
+ TCPOptionKindCC = 11 // obsolete
+ TCPOptionKindCCNew = 12 // obsolete
+ TCPOptionKindCCEcho = 13 // obsolete
+ TCPOptionKindAltChecksum = 14 // len = 3, obsolete
+ TCPOptionKindAltChecksumData = 15 // len = n, obsolete
+)
+
+func (k TCPOptionKind) String() string {
+ switch k {
+ case TCPOptionKindEndList:
+ return "EndList"
+ case TCPOptionKindNop:
+ return "NOP"
+ case TCPOptionKindMSS:
+ return "MSS"
+ case TCPOptionKindWindowScale:
+ return "WindowScale"
+ case TCPOptionKindSACKPermitted:
+ return "SACKPermitted"
+ case TCPOptionKindSACK:
+ return "SACK"
+ case TCPOptionKindEcho:
+ return "Echo"
+ case TCPOptionKindEchoReply:
+ return "EchoReply"
+ case TCPOptionKindTimestamps:
+ return "Timestamps"
+ case TCPOptionKindPartialOrderConnectionPermitted:
+ return "PartialOrderConnectionPermitted"
+ case TCPOptionKindPartialOrderServiceProfile:
+ return "PartialOrderServiceProfile"
+ case TCPOptionKindCC:
+ return "CC"
+ case TCPOptionKindCCNew:
+ return "CCNew"
+ case TCPOptionKindCCEcho:
+ return "CCEcho"
+ case TCPOptionKindAltChecksum:
+ return "AltChecksum"
+ case TCPOptionKindAltChecksumData:
+ return "AltChecksumData"
+ default:
+ return fmt.Sprintf("Unknown(%d)", k)
+ }
+}
+
+type TCPOption struct {
+ OptionType TCPOptionKind
+ OptionLength uint8
+ OptionData []byte
+}
+
+func (t TCPOption) String() string {
+ hd := hex.EncodeToString(t.OptionData)
+ if len(hd) > 0 {
+ hd = " 0x" + hd
+ }
+ switch t.OptionType {
+ case TCPOptionKindMSS:
+ return fmt.Sprintf("TCPOption(%s:%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint16(t.OptionData),
+ hd)
+
+ case TCPOptionKindTimestamps:
+ if len(t.OptionData) == 8 {
+ return fmt.Sprintf("TCPOption(%s:%v/%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint32(t.OptionData[:4]),
+ binary.BigEndian.Uint32(t.OptionData[4:8]),
+ hd)
+ }
+ }
+ return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd)
+}
+
+// LayerType returns gopacket.LayerTypeTCP
+func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP }
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var optionLength int
+ for _, o := range t.Options {
+ switch o.OptionType {
+ case 0, 1:
+ optionLength += 1
+ default:
+ optionLength += 2 + len(o.OptionData)
+ }
+ }
+ if opts.FixLengths {
+ if rem := optionLength % 4; rem != 0 {
+ t.Padding = lotsOfZeros[:4-rem]
+ }
+ t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4)
+ }
+ bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding))
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort))
+ binary.BigEndian.PutUint32(bytes[4:], t.Seq)
+ binary.BigEndian.PutUint32(bytes[8:], t.Ack)
+ binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset())
+ binary.BigEndian.PutUint16(bytes[14:], t.Window)
+ binary.BigEndian.PutUint16(bytes[18:], t.Urgent)
+ start := 20
+ for _, o := range t.Options {
+ bytes[start] = byte(o.OptionType)
+ switch o.OptionType {
+ case 0, 1:
+ start++
+ default:
+ if opts.FixLengths {
+ o.OptionLength = uint8(len(o.OptionData) + 2)
+ }
+ bytes[start+1] = o.OptionLength
+ copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData)
+ start += len(o.OptionData) + 2
+ }
+ }
+ copy(bytes[start:], t.Padding)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes in current serialization.
+ bytes[16] = 0
+ bytes[17] = 0
+ csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP)
+ if err != nil {
+ return err
+ }
+ t.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[16:], t.Checksum)
+ return nil
+}
+
+func (t *TCP) ComputeChecksum() (uint16, error) {
+ return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP)
+}
+
+func (t *TCP) flagsAndOffset() uint16 {
+ f := uint16(t.DataOffset) << 12
+ if t.FIN {
+ f |= 0x0001
+ }
+ if t.SYN {
+ f |= 0x0002
+ }
+ if t.RST {
+ f |= 0x0004
+ }
+ if t.PSH {
+ f |= 0x0008
+ }
+ if t.ACK {
+ f |= 0x0010
+ }
+ if t.URG {
+ f |= 0x0020
+ }
+ if t.ECE {
+ f |= 0x0040
+ }
+ if t.CWR {
+ f |= 0x0080
+ }
+ if t.NS {
+ f |= 0x0100
+ }
+ return f
+}
+
+func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data))
+ }
+ tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2]))
+ tcp.sPort = data[0:2]
+ tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4]))
+ tcp.dPort = data[2:4]
+ tcp.Seq = binary.BigEndian.Uint32(data[4:8])
+ tcp.Ack = binary.BigEndian.Uint32(data[8:12])
+ tcp.DataOffset = data[12] >> 4
+ tcp.FIN = data[13]&0x01 != 0
+ tcp.SYN = data[13]&0x02 != 0
+ tcp.RST = data[13]&0x04 != 0
+ tcp.PSH = data[13]&0x08 != 0
+ tcp.ACK = data[13]&0x10 != 0
+ tcp.URG = data[13]&0x20 != 0
+ tcp.ECE = data[13]&0x40 != 0
+ tcp.CWR = data[13]&0x80 != 0
+ tcp.NS = data[12]&0x01 != 0
+ tcp.Window = binary.BigEndian.Uint16(data[14:16])
+ tcp.Checksum = binary.BigEndian.Uint16(data[16:18])
+ tcp.Urgent = binary.BigEndian.Uint16(data[18:20])
+ if tcp.Options == nil {
+ // Pre-allocate to avoid allocating a slice.
+ tcp.Options = tcp.opts[:0]
+ } else {
+ tcp.Options = tcp.Options[:0]
+ }
+ if tcp.DataOffset < 5 {
+ return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset)
+ }
+ dataStart := int(tcp.DataOffset) * 4
+ if dataStart > len(data) {
+ df.SetTruncated()
+ tcp.Payload = nil
+ tcp.Contents = data
+ return errors.New("TCP data offset greater than packet length")
+ }
+ tcp.Contents = data[:dataStart]
+ tcp.Payload = data[dataStart:]
+ // From here on, data points just to the header options.
+ data = data[20:dataStart]
+ for len(data) > 0 {
+ tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])})
+ opt := &tcp.Options[len(tcp.Options)-1]
+ switch opt.OptionType {
+ case TCPOptionKindEndList: // End of options
+ opt.OptionLength = 1
+ tcp.Padding = data[1:]
+ break
+ case TCPOptionKindNop: // 1 byte padding
+ opt.OptionLength = 1
+ default:
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data))
+ }
+ opt.OptionLength = data[1]
+ if opt.OptionLength < 2 {
+ return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength)
+ } else if int(opt.OptionLength) > len(data) {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data))
+ }
+ opt.OptionData = data[2:opt.OptionLength]
+ }
+ data = data[opt.OptionLength:]
+ }
+ return nil
+}
+
+func (t *TCP) CanDecode() gopacket.LayerClass {
+ return LayerTypeTCP
+}
+
+func (t *TCP) NextLayerType() gopacket.LayerType {
+ lt := t.DstPort.LayerType()
+ if lt == gopacket.LayerTypePayload {
+ lt = t.SrcPort.LayerType()
+ }
+ return lt
+}
+
+func decodeTCP(data []byte, p gopacket.PacketBuilder) error {
+ tcp := &TCP{}
+ err := tcp.DecodeFromBytes(data, p)
+ p.AddLayer(tcp)
+ p.SetTransportLayer(tcp)
+ if err != nil {
+ return err
+ }
+ if p.DecodeOptions().DecodeStreamsAsDatagrams {
+ return p.NextDecoder(tcp.NextLayerType())
+ } else {
+ return p.NextDecoder(gopacket.LayerTypePayload)
+ }
+}
+
+func (t *TCP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort)
+}
+
+// For testing only
+func (t *TCP) SetInternalPortsForTesting() {
+ t.sPort = make([]byte, 2)
+ t.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/google/gopacket/layers/tcpip.go
new file mode 100644
index 0000000..64ba51c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcpip.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// Checksum computation for TCP/UDP.
+type tcpipchecksum struct {
+ pseudoheader tcpipPseudoHeader
+}
+
+type tcpipPseudoHeader interface {
+ pseudoheaderChecksum() (uint32, error)
+}
+
+func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo4(); err != nil {
+ return 0, err
+ }
+ csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
+ csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
+ csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
+ csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
+ return csum, nil
+}
+
+func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo16(); err != nil {
+ return 0, err
+ }
+ for i := 0; i < 16; i += 2 {
+ csum += uint32(ip.SrcIP[i]) << 8
+ csum += uint32(ip.SrcIP[i+1])
+ csum += uint32(ip.DstIP[i]) << 8
+ csum += uint32(ip.DstIP[i+1])
+ }
+ return csum, nil
+}
+
+// Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any
+// initial checksum data that's already been computed.
+func tcpipChecksum(data []byte, csum uint32) uint16 {
+ // to handle odd lengths, we loop to length - 1, incrementing by 2, then
+ // handle the last byte specifically by checking against the original
+ // length.
+ length := len(data) - 1
+ for i := 0; i < length; i += 2 {
+ // For our test packet, doing this manually is about 25% faster
+ // (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
+ csum += uint32(data[i]) << 8
+ csum += uint32(data[i+1])
+ }
+ if len(data)%2 == 1 {
+ csum += uint32(data[length]) << 8
+ }
+ for csum > 0xffff {
+ csum = (csum >> 16) + (csum & 0xffff)
+ }
+ return ^uint16(csum)
+}
+
+// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the
+// serialized TCP or UDP header plus its payload, with the checksum zero'd
+// out. headerProtocol is the IP protocol number of the upper-layer header.
+func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) {
+ if c.pseudoheader == nil {
+ return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
+ }
+ length := uint32(len(headerAndPayload))
+ csum, err := c.pseudoheader.pseudoheaderChecksum()
+ if err != nil {
+ return 0, err
+ }
+ csum += uint32(headerProtocol)
+ csum += length & 0xffff
+ csum += length >> 16
+ return tcpipChecksum(headerAndPayload, csum), nil
+}
+
+// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
+// This is needed for computing the checksum when serializing, since TCP/IP transport
+// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
+// The passed in layer must be an *IPv4 or *IPv6.
+func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
+ switch v := l.(type) {
+ case *IPv4:
+ i.pseudoheader = v
+ case *IPv6:
+ i.pseudoheader = v
+ default:
+ return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/google/gopacket/layers/test_creator.py
new file mode 100644
index 0000000..c92d276
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/test_creator.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2012 Google, Inc. All rights reserved.
+
+"""TestCreator creates test templates from pcap files."""
+
+import argparse
+import base64
+import glob
+import re
+import string
+import subprocess
+import sys
+
+
+class Packet(object):
+ """Helper class encapsulating packet from a pcap file."""
+
+ def __init__(self, packet_lines):
+ self.packet_lines = packet_lines
+ self.data = self._DecodeText(packet_lines)
+
+ @classmethod
+ def _DecodeText(cls, packet_lines):
+ packet_bytes = []
+ # First line is timestamp and stuff, skip it.
+ # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?.........
+
+ for line in packet_lines[1:]:
+ m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE)
+ if m is None: continue
+ for hexpart in m.group(1).split():
+ packet_bytes.append(base64.b16decode(hexpart.upper()))
+ return ''.join(packet_bytes)
+
+ def Test(self, name, link_type):
+ """Yields a test using this packet, as a set of lines."""
+ yield '// testPacket%s is the packet:' % name
+ for line in self.packet_lines:
+ yield '// ' + line
+ yield 'var testPacket%s = []byte{' % name
+ data = list(self.data)
+ while data:
+ linebytes, data = data[:16], data[16:]
+ yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes])
+ yield '}'
+ yield 'func TestPacket%s(t *testing.T) {' % name
+ yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type)
+ yield '\tif p.ErrorLayer() != nil {'
+ yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())'
+ yield '\t}'
+ yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type
+ yield '}'
+ yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name
+ yield '\tfor i := 0; i < b.N; i++ {'
+ yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type)
+ yield '\t}'
+ yield '}'
+
+
+
+def GetTcpdumpOutput(filename):
+ """Runs tcpdump on the given file, returning output as string."""
+ return subprocess.check_output(
+ ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename])
+
+
+def TcpdumpOutputToPackets(output):
+ """Reads a pcap file with TCPDump, yielding Packet objects."""
+ pdata = []
+ for line in output.splitlines():
+ if line[0] not in string.whitespace and pdata:
+ yield Packet(pdata)
+ pdata = []
+ pdata.append(line)
+ if pdata:
+ yield Packet(pdata)
+
+
+def main():
+ class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+ def _format_usage(self, usage, actions, groups, prefix=None):
+ header =('TestCreator creates gopacket tests using a pcap file.\n\n'
+ 'Tests are written to standard out... they can then be \n'
+ 'copied into the file of your choice and modified as \n'
+ 'you see.\n\n')
+ return header + argparse.ArgumentDefaultsHelpFormatter._format_usage(
+ self, usage, actions, groups, prefix)
+
+ parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter)
+ parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)')
+ parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it')
+ parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process')
+
+ args = parser.parse_args()
+
+ for arg in args.files:
+ for path in glob.glob(arg):
+ for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))):
+ print '\n'.join(packet.Test(
+ args.name % i, args.link_type))
+
+if __name__ == '__main__':
+ main()
diff --git a/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/google/gopacket/layers/tls.go
new file mode 100644
index 0000000..ddb6ff9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls.go
@@ -0,0 +1,208 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSType defines the type of data after the TLS Record
+type TLSType uint8
+
+// TLSType known values.
+const (
+ TLSChangeCipherSpec TLSType = 20
+ TLSAlert TLSType = 21
+ TLSHandshake TLSType = 22
+ TLSApplicationData TLSType = 23
+ TLSUnknown TLSType = 255
+)
+
+// String shows the register type nicely formatted
+func (tt TLSType) String() string {
+ switch tt {
+ default:
+ return "Unknown"
+ case TLSChangeCipherSpec:
+ return "Change Cipher Spec"
+ case TLSAlert:
+ return "Alert"
+ case TLSHandshake:
+ return "Handshake"
+ case TLSApplicationData:
+ return "Application Data"
+ }
+}
+
+// TLSVersion represents the TLS version in numeric format
+type TLSVersion uint16
+
+// Strings shows the TLS version nicely formatted
+func (tv TLSVersion) String() string {
+ switch tv {
+ default:
+ return "Unknown"
+ case 0x0200:
+ return "SSL 2.0"
+ case 0x0300:
+ return "SSL 3.0"
+ case 0x0301:
+ return "TLS 1.0"
+ case 0x0302:
+ return "TLS 1.1"
+ case 0x0303:
+ return "TLS 1.2"
+ case 0x0304:
+ return "TLS 1.3"
+ }
+}
+
+// TLS is specified in RFC 5246
+//
+// TLS Record Protocol
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Content Type |
+// +--+--+--+--+--+--+--+--+
+// | Version (major) |
+// +--+--+--+--+--+--+--+--+
+// | Version (minor) |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+
+// TLS is actually a slide of TLSrecord structures
+type TLS struct {
+ BaseLayer
+
+ // TLS Records
+ ChangeCipherSpec []TLSChangeCipherSpecRecord
+ Handshake []TLSHandshakeRecord
+ AppData []TLSAppDataRecord
+ Alert []TLSAlertRecord
+}
+
+// TLSRecordHeader contains all the information that each TLS Record types should have
+type TLSRecordHeader struct {
+ ContentType TLSType
+ Version TLSVersion
+ Length uint16
+}
+
+// LayerType returns gopacket.LayerTypeTLS.
+func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS }
+
+// decodeTLS decodes the byte slice into a TLS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeTLS(data []byte, p gopacket.PacketBuilder) error {
+ t := &TLS{}
+ err := t.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(t)
+ p.SetApplicationLayer(t)
+ return nil
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ t.BaseLayer.Contents = data
+ t.BaseLayer.Payload = nil
+
+ t.ChangeCipherSpec = t.ChangeCipherSpec[:0]
+ t.Handshake = t.Handshake[:0]
+ t.AppData = t.AppData[:0]
+ t.Alert = t.Alert[:0]
+
+ return t.decodeTLSRecords(data, df)
+}
+
+func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 5 {
+ df.SetTruncated()
+ return errors.New("TLS record too short")
+ }
+
+ // since there are no further layers, the baselayer's content is
+ // pointing to this layer
+ t.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ var h TLSRecordHeader
+ h.ContentType = TLSType(data[0])
+ h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3]))
+ h.Length = binary.BigEndian.Uint16(data[3:5])
+
+ if h.ContentType.String() == "Unknown" {
+ return errors.New("Unknown TLS record type")
+ }
+
+ hl := 5 // header length
+ tl := hl + int(h.Length)
+ if len(data) < tl {
+ df.SetTruncated()
+ return errors.New("TLS packet length mismatch")
+ }
+
+ switch h.ContentType {
+ default:
+ return errors.New("Unknown TLS record type")
+ case TLSChangeCipherSpec:
+ var r TLSChangeCipherSpecRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.ChangeCipherSpec = append(t.ChangeCipherSpec, r)
+ case TLSAlert:
+ var r TLSAlertRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Alert = append(t.Alert, r)
+ case TLSHandshake:
+ var r TLSHandshakeRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Handshake = append(t.Handshake, r)
+ case TLSApplicationData:
+ var r TLSAppDataRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.AppData = append(t.AppData, r)
+ }
+
+ if len(data) == tl {
+ return nil
+ }
+ return t.decodeTLSRecords(data[tl:len(data)], df)
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (t *TLS) CanDecode() gopacket.LayerClass {
+ return LayerTypeTLS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (t *TLS) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
+func (t *TLS) Payload() []byte {
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/google/gopacket/layers/tls_alert.go
new file mode 100644
index 0000000..0c5aee0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_alert.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// TLSAlertLevel defines the alert level data type
+type TLSAlertLevel uint8
+
+// TLSAlertDescr defines the alert descrption data type
+type TLSAlertDescr uint8
+
+const (
+ TLSAlertWarning TLSAlertLevel = 1
+ TLSAlertFatal TLSAlertLevel = 2
+ TLSAlertUnknownLevel TLSAlertLevel = 255
+
+ TLSAlertCloseNotify TLSAlertDescr = 0
+ TLSAlertUnexpectedMessage TLSAlertDescr = 10
+ TLSAlertBadRecordMac TLSAlertDescr = 20
+ TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21
+ TLSAlertRecordOverflow TLSAlertDescr = 22
+ TLSAlertDecompressionFailure TLSAlertDescr = 30
+ TLSAlertHandshakeFailure TLSAlertDescr = 40
+ TLSAlertNoCertificateRESERVED TLSAlertDescr = 41
+ TLSAlertBadCertificate TLSAlertDescr = 42
+ TLSAlertUnsupportedCertificate TLSAlertDescr = 43
+ TLSAlertCertificateRevoked TLSAlertDescr = 44
+ TLSAlertCertificateExpired TLSAlertDescr = 45
+ TLSAlertCertificateUnknown TLSAlertDescr = 46
+ TLSAlertIllegalParameter TLSAlertDescr = 47
+ TLSAlertUnknownCa TLSAlertDescr = 48
+ TLSAlertAccessDenied TLSAlertDescr = 49
+ TLSAlertDecodeError TLSAlertDescr = 50
+ TLSAlertDecryptError TLSAlertDescr = 51
+ TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60
+ TLSAlertProtocolVersion TLSAlertDescr = 70
+ TLSAlertInsufficientSecurity TLSAlertDescr = 71
+ TLSAlertInternalError TLSAlertDescr = 80
+ TLSAlertUserCanceled TLSAlertDescr = 90
+ TLSAlertNoRenegotiation TLSAlertDescr = 100
+ TLSAlertUnsupportedExtension TLSAlertDescr = 110
+ TLSAlertUnknownDescription TLSAlertDescr = 255
+)
+
+// TLS Alert
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Level |
+// +--+--+--+--+--+--+--+--+
+// | Description |
+// +--+--+--+--+--+--+--+--+
+
+// TLSAlertRecord contains all the information that each Alert Record type should have
+type TLSAlertRecord struct {
+ TLSRecordHeader
+
+ Level TLSAlertLevel
+ Description TLSAlertDescr
+
+ EncryptedMsg []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) < 2 {
+ df.SetTruncated()
+ return errors.New("TLS Alert packet too short")
+ }
+
+ if t.Length == 2 {
+ t.Level = TLSAlertLevel(data[0])
+ t.Description = TLSAlertDescr(data[1])
+ } else {
+ t.Level = TLSAlertUnknownLevel
+ t.Description = TLSAlertUnknownDescription
+ t.EncryptedMsg = data
+ }
+
+ return nil
+}
+
+// Strings shows the TLS alert level nicely formatted
+func (al TLSAlertLevel) String() string {
+ switch al {
+ default:
+ return fmt.Sprintf("Unknown(%d)", al)
+ case TLSAlertWarning:
+ return "Warning"
+ case TLSAlertFatal:
+ return "Fatal"
+ }
+}
+
+// Strings shows the TLS alert description nicely formatted
+func (ad TLSAlertDescr) String() string {
+ switch ad {
+ default:
+ return "Unknown"
+ case TLSAlertCloseNotify:
+ return "close_notify"
+ case TLSAlertUnexpectedMessage:
+ return "unexpected_message"
+ case TLSAlertBadRecordMac:
+ return "bad_record_mac"
+ case TLSAlertDecryptionFailedRESERVED:
+ return "decryption_failed_RESERVED"
+ case TLSAlertRecordOverflow:
+ return "record_overflow"
+ case TLSAlertDecompressionFailure:
+ return "decompression_failure"
+ case TLSAlertHandshakeFailure:
+ return "handshake_failure"
+ case TLSAlertNoCertificateRESERVED:
+ return "no_certificate_RESERVED"
+ case TLSAlertBadCertificate:
+ return "bad_certificate"
+ case TLSAlertUnsupportedCertificate:
+ return "unsupported_certificate"
+ case TLSAlertCertificateRevoked:
+ return "certificate_revoked"
+ case TLSAlertCertificateExpired:
+ return "certificate_expired"
+ case TLSAlertCertificateUnknown:
+ return "certificate_unknown"
+ case TLSAlertIllegalParameter:
+ return "illegal_parameter"
+ case TLSAlertUnknownCa:
+ return "unknown_ca"
+ case TLSAlertAccessDenied:
+ return "access_denied"
+ case TLSAlertDecodeError:
+ return "decode_error"
+ case TLSAlertDecryptError:
+ return "decrypt_error"
+ case TLSAlertExportRestrictionRESERVED:
+ return "export_restriction_RESERVED"
+ case TLSAlertProtocolVersion:
+ return "protocol_version"
+ case TLSAlertInsufficientSecurity:
+ return "insufficient_security"
+ case TLSAlertInternalError:
+ return "internal_error"
+ case TLSAlertUserCanceled:
+ return "user_canceled"
+ case TLSAlertNoRenegotiation:
+ return "no_renegotiation"
+ case TLSAlertUnsupportedExtension:
+ return "unsupported_extension"
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/google/gopacket/layers/tls_appdata.go
new file mode 100644
index 0000000..dedd1d5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_appdata.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSAppDataRecord contains all the information that each AppData Record types should have
+type TLSAppDataRecord struct {
+ TLSRecordHeader
+ Payload []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != int(t.Length) {
+ return errors.New("TLS Application Data length mismatch")
+ }
+
+ t.Payload = data
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
new file mode 100644
index 0000000..8f3dc62
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record
+type TLSchangeCipherSpec uint8
+
+const (
+ TLSChangecipherspecMessage TLSchangeCipherSpec = 1
+ TLSChangecipherspecUnknown TLSchangeCipherSpec = 255
+)
+
+// TLS Change Cipher Spec
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Message |
+// +--+--+--+--+--+--+--+--+
+
+// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record
+type TLSChangeCipherSpecRecord struct {
+ TLSRecordHeader
+
+ Message TLSchangeCipherSpec
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != 1 {
+ df.SetTruncated()
+ return errors.New("TLS Change Cipher Spec record incorrect length")
+ }
+
+ t.Message = TLSchangeCipherSpec(data[0])
+ if t.Message != TLSChangecipherspecMessage {
+ t.Message = TLSChangecipherspecUnknown
+ }
+
+ return nil
+}
+
+// String shows the message value nicely formatted
+func (ccs TLSchangeCipherSpec) String() string {
+ switch ccs {
+ default:
+ return "Unknown"
+ case TLSChangecipherspecMessage:
+ return "Change Cipher Spec Message"
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/google/gopacket/layers/tls_handshake.go
new file mode 100644
index 0000000..e45e2c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_handshake.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// TLSHandshakeRecord defines the structure of a Handshare Record
+type TLSHandshakeRecord struct {
+ TLSRecordHeader
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ // TODO
+
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/google/gopacket/layers/udp.go
new file mode 100644
index 0000000..97e81c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udp.go
@@ -0,0 +1,133 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// UDP is the layer for UDP headers.
+type UDP struct {
+ BaseLayer
+ SrcPort, DstPort UDPPort
+ Length uint16
+ Checksum uint16
+ sPort, dPort []byte
+ tcpipchecksum
+}
+
+// LayerType returns gopacket.LayerTypeUDP
+func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP }
+
+func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data))
+ }
+ udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2]))
+ udp.sPort = data[0:2]
+ udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4]))
+ udp.dPort = data[2:4]
+ udp.Length = binary.BigEndian.Uint16(data[4:6])
+ udp.Checksum = binary.BigEndian.Uint16(data[6:8])
+ udp.BaseLayer = BaseLayer{Contents: data[:8]}
+ switch {
+ case udp.Length >= 8:
+ hlen := int(udp.Length)
+ if hlen > len(data) {
+ df.SetTruncated()
+ hlen = len(data)
+ }
+ udp.Payload = data[8:hlen]
+ case udp.Length == 0: // Jumbogram, use entire rest of data
+ udp.Payload = data[8:]
+ default:
+ return fmt.Errorf("UDP packet too small: %d bytes", udp.Length)
+ }
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var jumbo bool
+
+ payload := b.Bytes()
+ if _, ok := u.pseudoheader.(*IPv6); ok {
+ if len(payload)+8 > 65535 {
+ jumbo = true
+ }
+ }
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort))
+ if opts.FixLengths {
+ if jumbo {
+ u.Length = 0
+ } else {
+ u.Length = uint16(len(payload)) + 8
+ }
+ }
+ binary.BigEndian.PutUint16(bytes[4:], u.Length)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes
+ bytes[6] = 0
+ bytes[7] = 0
+ csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP)
+ if err != nil {
+ return err
+ }
+ u.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[6:], u.Checksum)
+ return nil
+}
+
+func (u *UDP) CanDecode() gopacket.LayerClass {
+ return LayerTypeUDP
+}
+
+// NextLayerType use the destination port to select the
+// right next decoder. It tries first to decode via the
+// destination port, then the source port.
+func (u *UDP) NextLayerType() gopacket.LayerType {
+ if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload {
+ return lt
+ }
+ return u.SrcPort.LayerType()
+}
+
+func decodeUDP(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDP{}
+ err := udp.DecodeFromBytes(data, p)
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(udp.NextLayerType())
+}
+
+func (u *UDP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort)
+}
+
+// For testing only
+func (u *UDP) SetInternalPortsForTesting() {
+ u.sPort = make([]byte, 2)
+ u.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/google/gopacket/layers/udplite.go
new file mode 100644
index 0000000..7d84c51
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udplite.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// UDPLite is the layer for UDP-Lite headers (rfc 3828).
+type UDPLite struct {
+ BaseLayer
+ SrcPort, DstPort UDPLitePort
+ ChecksumCoverage uint16
+ Checksum uint16
+ sPort, dPort []byte
+}
+
+// LayerType returns gopacket.LayerTypeUDPLite
+func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite }
+
+func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDPLite{
+ SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])),
+ sPort: data[0:2],
+ DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])),
+ dPort: data[2:4],
+ ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]),
+ Checksum: binary.BigEndian.Uint16(data[6:8]),
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ }
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (u *UDPLite) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort)
+}
diff --git a/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/google/gopacket/layers/usb.go
new file mode 100644
index 0000000..0b4d4af
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/usb.go
@@ -0,0 +1,287 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+type USBEventType uint8
+
+const (
+ USBEventTypeSubmit USBEventType = 'S'
+ USBEventTypeComplete USBEventType = 'C'
+ USBEventTypeError USBEventType = 'E'
+)
+
+func (a USBEventType) String() string {
+ switch a {
+ case USBEventTypeSubmit:
+ return "SUBMIT"
+ case USBEventTypeComplete:
+ return "COMPLETE"
+ case USBEventTypeError:
+ return "ERROR"
+ default:
+ return "Unknown event type"
+ }
+}
+
+type USBRequestBlockSetupRequest uint8
+
+const (
+ USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00
+ USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01
+ USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03
+ USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05
+ USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06
+ USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07
+ USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08
+ USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09
+ USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a
+)
+
+func (a USBRequestBlockSetupRequest) String() string {
+ switch a {
+ case USBRequestBlockSetupRequestGetStatus:
+ return "GET_STATUS"
+ case USBRequestBlockSetupRequestClearFeature:
+ return "CLEAR_FEATURE"
+ case USBRequestBlockSetupRequestSetFeature:
+ return "SET_FEATURE"
+ case USBRequestBlockSetupRequestSetAddress:
+ return "SET_ADDRESS"
+ case USBRequestBlockSetupRequestGetDescriptor:
+ return "GET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestSetDescriptor:
+ return "SET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestGetConfiguration:
+ return "GET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetConfiguration:
+ return "SET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetIdle:
+ return "SET_IDLE"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+type USBTransportType uint8
+
+const (
+ USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive
+ USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream.
+ USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards.
+ USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations.
+ USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers.
+)
+
+type USBDirectionType uint8
+
+const (
+ USBDirectionTypeUnknown USBDirectionType = iota
+ USBDirectionTypeIn
+ USBDirectionTypeOut
+)
+
+func (a USBDirectionType) String() string {
+ switch a {
+ case USBDirectionTypeIn:
+ return "In"
+ case USBDirectionTypeOut:
+ return "Out"
+ default:
+ return "Unknown direction type"
+ }
+}
+
+// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol.
+type USB struct {
+ BaseLayer
+ ID uint64
+ EventType USBEventType
+ TransferType USBTransportType
+ Direction USBDirectionType
+ EndpointNumber uint8
+ DeviceAddress uint8
+ BusID uint16
+ TimestampSec int64
+ TimestampUsec int32
+ Setup bool
+ Data bool
+ Status int32
+ UrbLength uint32
+ UrbDataLength uint32
+
+ UrbInterval uint32
+ UrbStartFrame uint32
+ UrbCopyOfTransferFlags uint32
+ IsoNumDesc uint32
+}
+
+func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB }
+
+func (m *USB) NextLayerType() gopacket.LayerType {
+ if m.Setup {
+ return LayerTypeUSBRequestBlockSetup
+ } else if m.Data {
+ }
+
+ return m.TransferType.LayerType()
+}
+
+func decodeUSB(data []byte, p gopacket.PacketBuilder) error {
+ d := &USB{}
+
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.ID = binary.LittleEndian.Uint64(data[0:8])
+ m.EventType = USBEventType(data[8])
+ m.TransferType = USBTransportType(data[9])
+
+ m.EndpointNumber = data[10] & 0x7f
+ if data[10]&uint8(USBTransportTypeTransferIn) > 0 {
+ m.Direction = USBDirectionTypeIn
+ } else {
+ m.Direction = USBDirectionTypeOut
+ }
+
+ m.DeviceAddress = data[11]
+ m.BusID = binary.LittleEndian.Uint16(data[12:14])
+
+ if uint(data[14]) == 0 {
+ m.Setup = true
+ }
+
+ if uint(data[15]) == 0 {
+ m.Data = true
+ }
+
+ m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24]))
+ m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28]))
+ m.Status = int32(binary.LittleEndian.Uint32(data[28:32]))
+ m.UrbLength = binary.LittleEndian.Uint32(data[32:36])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40])
+
+ m.Contents = data[:40]
+ m.Payload = data[40:]
+
+ if m.Setup {
+ m.Payload = data[40:]
+ } else if m.Data {
+ m.Payload = data[uint32(len(data))-m.UrbDataLength:]
+ }
+
+ // if 64 bit, dissect_linux_usb_pseudo_header_ext
+ if false {
+ m.UrbInterval = binary.LittleEndian.Uint32(data[40:44])
+ m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52])
+ m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56])
+ m.Contents = data[:56]
+ m.Payload = data[56:]
+ }
+
+ // crc5 or crc16
+ // eop (end of packet)
+
+ return nil
+}
+
+type USBRequestBlockSetup struct {
+ BaseLayer
+ RequestType uint8
+ Request USBRequestBlockSetupRequest
+ Value uint16
+ Index uint16
+ Length uint16
+}
+
+func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup }
+
+func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.RequestType = data[0]
+ m.Request = USBRequestBlockSetupRequest(data[1])
+ m.Value = binary.LittleEndian.Uint16(data[2:4])
+ m.Index = binary.LittleEndian.Uint16(data[4:6])
+ m.Length = binary.LittleEndian.Uint16(data[6:8])
+ m.Contents = data[:8]
+ m.Payload = data[8:]
+ return nil
+}
+
+func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBRequestBlockSetup{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBControl struct {
+ BaseLayer
+}
+
+func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl }
+
+func (m *USBControl) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBControl{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBInterrupt struct {
+ BaseLayer
+}
+
+func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt }
+
+func (m *USBInterrupt) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBInterrupt{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBBulk struct {
+ BaseLayer
+}
+
+func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk }
+
+func (m *USBBulk) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBBulk{}
+ return decodingLayerDecoder(d, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/google/gopacket/layers/vrrp.go
new file mode 100644
index 0000000..ffaafe6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vrrp.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+/*
+ This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2.
+ https://tools.ietf.org/html/rfc3768#section-5
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Auth Type | Adver Int | Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | . |
+ | . |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (n) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (2) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+type VRRPv2Type uint8
+type VRRPv2AuthType uint8
+
+const (
+ VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement
+)
+
+// String conversions for VRRP message types
+func (v VRRPv2Type) String() string {
+ switch v {
+ case VRRPv2Advertisement:
+ return "VRRPv2 Advertisement"
+ default:
+ return ""
+ }
+}
+
+const (
+ VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication
+ VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1
+ VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2
+)
+
+func (v VRRPv2AuthType) String() string {
+ switch v {
+ case VRRPv2AuthNoAuth:
+ return "No Authentication"
+ case VRRPv2AuthReserved1:
+ return "Reserved"
+ case VRRPv2AuthReserved2:
+ return "Reserved"
+ default:
+ return ""
+ }
+}
+
+// VRRPv2 represents an VRRP v2 message.
+type VRRPv2 struct {
+ BaseLayer
+ Version uint8 // The version field specifies the VRRP protocol version of this packet (v2)
+ Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT
+ VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for
+ Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default)
+ CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement.
+ AuthType VRRPv2AuthType // identifies the authentication method being utilized
+ AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second
+ Checksum uint16 // used to detect data corruption in the VRRP message.
+ IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field.
+}
+
+// LayerType returns LayerTypeVRRP for VRRP v2 message.
+func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP }
+
+func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ v.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+ v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2
+
+ v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement)
+ if v.Type != 1 {
+ // rfc3768: A packet with unknown type MUST be discarded.
+ return errors.New("Unrecognized VRRPv2 type field.")
+ }
+
+ v.VirtualRtrID = data[1]
+ v.Priority = data[2]
+
+ v.CountIPAddr = data[3]
+ if v.CountIPAddr < 1 {
+ return errors.New("VRRPv2 number of IP addresses is not valid.")
+ }
+
+ v.AuthType = VRRPv2AuthType(data[4])
+ v.AdverInt = uint8(data[5])
+ v.Checksum = binary.BigEndian.Uint16(data[6:8])
+
+ // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field
+ // offset references the starting byte containing the list of ip addresses
+ offset := 8
+ for i := uint8(0); i < v.CountIPAddr; i++ {
+ v.IPAddress = append(v.IPAddress, data[offset:offset+4])
+ offset += 4
+ }
+
+ // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC
+ //
+ // 5.3.10. Authentication Data
+ //
+ // The authentication string is currently only used to maintain
+ // backwards compatibility with RFC 2338. It SHOULD be set to zero on
+ // transmission and ignored on reception.
+ return nil
+}
+
+// CanDecode specifies the layer type in which we are attempting to unwrap.
+func (v *VRRPv2) CanDecode() gopacket.LayerClass {
+ return LayerTypeVRRP
+}
+
+// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0
+func (v *VRRPv2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// The VRRP packet does not include payload data. Setting byte slice to nil
+func (v *VRRPv2) Payload() []byte {
+ return nil
+}
+
+// decodeVRRP will parse VRRP v2
+func decodeVRRP(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 8 {
+ return errors.New("Not a valid VRRP packet. Packet length is too small.")
+ }
+ v := &VRRPv2{}
+ return decodingLayerDecoder(v, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/google/gopacket/layers/vxlan.go
new file mode 100644
index 0000000..4f79ea4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vxlan.go
@@ -0,0 +1,98 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348
+// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// 0 8 16 24 32
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | 24 bit VXLAN Network Identifier | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// VXLAN is a VXLAN packet header
+type VXLAN struct {
+ BaseLayer
+ ValidIDFlag bool // 'I' bit per RFC 7348
+ VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348
+ GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ GBPDontLearn bool // 'D' bit per Group Policy
+ GBPApplied bool // 'A' bit per Group Policy
+ GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy
+}
+
+// LayerType returns LayerTypeVXLAN
+func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN }
+
+func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error {
+ vx := &VXLAN{}
+
+ // VNI is a 24bit number, Uint32 requires 32 bits
+ var buf [4]byte
+ copy(buf[1:], data[4:7])
+
+ // RFC 7348 https://tools.ietf.org/html/rfc7348
+ vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348
+ vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348
+
+ // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft
+ vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame.
+ vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet.
+ vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft
+
+ // Layer information
+ const vxlanLength = 8
+ vx.Contents = data[:vxlanLength]
+ vx.Payload = data[vxlanLength:]
+
+ p.AddLayer(vx)
+ return p.NextDecoder(LinkTypeEthernet)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+
+ // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero
+ bytes[0] = 0
+ bytes[1] = 0
+
+ if vx.ValidIDFlag {
+ bytes[0] |= 0x08
+ }
+ if vx.GBPExtension {
+ bytes[0] |= 0x80
+ }
+ if vx.GBPDontLearn {
+ bytes[1] |= 0x40
+ }
+ if vx.GBPApplied {
+ bytes[1] |= 0x80
+ }
+
+ binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID)
+ if vx.VNI >= 1<<24 {
+ return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI)
+ }
+ binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/google/gopacket/layertype.go
new file mode 100644
index 0000000..3abfee1
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layertype.go
@@ -0,0 +1,111 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// LayerType is a unique identifier for each type of layer. This enumeration
+// does not match with any externally available numbering scheme... it's solely
+// usable/useful within this library as a means for requesting layer types
+// (see Packet.Layer) and determining which types of layers have been decoded.
+//
+// New LayerTypes may be created by calling gopacket.RegisterLayerType.
+type LayerType int64
+
+// LayerTypeMetadata contains metadata associated with each LayerType.
+type LayerTypeMetadata struct {
+ // Name is the string returned by each layer type's String method.
+ Name string
+ // Decoder is the decoder to use when the layer type is passed in as a
+ // Decoder.
+ Decoder Decoder
+}
+
+type layerTypeMetadata struct {
+ inUse bool
+ LayerTypeMetadata
+}
+
+// DecodersByLayerName maps layer names to decoders for those layers.
+// This allows users to specify decoders by name to a program and have that
+// program pick the correct decoder accordingly.
+var DecodersByLayerName = map[string]Decoder{}
+
+const maxLayerType = 2000
+
+var ltMeta [maxLayerType]layerTypeMetadata
+var ltMetaMap = map[LayerType]layerTypeMetadata{}
+
+// RegisterLayerType creates a new layer type and registers it globally.
+// The number passed in must be unique, or a runtime panic will occur. Numbers
+// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be
+// used for common application-specific types, and are very fast. Any other
+// number (negative or >= 2000) may be used for uncommon application-specific
+// types, and are somewhat slower (they require a map lookup over an array
+// index).
+func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ if ltMeta[num].inUse {
+ panic("Layer type already exists")
+ }
+ } else {
+ if ltMetaMap[LayerType(num)].inUse {
+ panic("Layer type already exists")
+ }
+ }
+ return OverrideLayerType(num, meta)
+}
+
+// OverrideLayerType acts like RegisterLayerType, except that if the layer type
+// has already been registered, it overrides the metadata with the passed-in
+// metadata intead of panicing.
+func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ ltMeta[num] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ } else {
+ ltMetaMap[LayerType(num)] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ }
+ DecodersByLayerName[meta.Name] = meta.Decoder
+ return LayerType(num)
+}
+
+// Decode decodes the given data using the decoder registered with the layer
+// type.
+func (t LayerType) Decode(data []byte, c PacketBuilder) error {
+ var d Decoder
+ if 0 <= int(t) && int(t) < maxLayerType {
+ d = ltMeta[int(t)].Decoder
+ } else {
+ d = ltMetaMap[t].Decoder
+ }
+ if d != nil {
+ return d.Decode(data, c)
+ }
+ return fmt.Errorf("Layer type %v has no associated decoder", t)
+}
+
+// String returns the string associated with this layer type.
+func (t LayerType) String() (s string) {
+ if 0 <= int(t) && int(t) < maxLayerType {
+ s = ltMeta[int(t)].Name
+ } else {
+ s = ltMetaMap[t].Name
+ }
+ if s == "" {
+ s = strconv.Itoa(int(t))
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go
new file mode 100644
index 0000000..3a7c4b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/packet.go
@@ -0,0 +1,864 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "syscall"
+ "time"
+)
+
+// CaptureInfo provides standardized information about a packet captured off
+// the wire or read from a file.
+type CaptureInfo struct {
+ // Timestamp is the time the packet was captured, if that is known.
+ Timestamp time.Time
+ // CaptureLength is the total number of bytes read off of the wire.
+ CaptureLength int
+ // Length is the size of the original packet. Should always be >=
+ // CaptureLength.
+ Length int
+ // InterfaceIndex
+ InterfaceIndex int
+ // The packet source can place ancillary data of various types here.
+ // For example, the afpacket source can report the VLAN of captured
+ // packets this way.
+ AncillaryData []interface{}
+}
+
+// PacketMetadata contains metadata for a packet.
+type PacketMetadata struct {
+ CaptureInfo
+ // Truncated is true if packet decoding logic detects that there are fewer
+ // bytes in the packet than are detailed in various headers (for example, if
+ // the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
+ // This is also set automatically for packets captured off the wire if
+ // CaptureInfo.CaptureLength < CaptureInfo.Length.
+ Truncated bool
+}
+
+// Packet is the primary object used by gopacket. Packets are created by a
+// Decoder's Decode call. A packet is made up of a set of Data, which
+// is broken into a number of Layers as it is decoded.
+type Packet interface {
+ //// Functions for outputting the packet as a human-readable string:
+ //// ------------------------------------------------------------------
+ // String returns a human-readable string representation of the packet.
+ // It uses LayerString on each layer to output the layer.
+ String() string
+ // Dump returns a verbose human-readable string representation of the packet,
+ // including a hex dump of all layers. It uses LayerDump on each layer to
+ // output the layer.
+ Dump() string
+
+ //// Functions for accessing arbitrary packet layers:
+ //// ------------------------------------------------------------------
+ // Layers returns all layers in this packet, computing them as necessary
+ Layers() []Layer
+ // Layer returns the first layer in this packet of the given type, or nil
+ Layer(LayerType) Layer
+ // LayerClass returns the first layer in this packet of the given class,
+ // or nil.
+ LayerClass(LayerClass) Layer
+
+ //// Functions for accessing specific types of packet layers. These functions
+ //// return the first layer of each type found within the packet.
+ //// ------------------------------------------------------------------
+ // LinkLayer returns the first link layer in the packet
+ LinkLayer() LinkLayer
+ // NetworkLayer returns the first network layer in the packet
+ NetworkLayer() NetworkLayer
+ // TransportLayer returns the first transport layer in the packet
+ TransportLayer() TransportLayer
+ // ApplicationLayer returns the first application layer in the packet
+ ApplicationLayer() ApplicationLayer
+ // ErrorLayer is particularly useful, since it returns nil if the packet
+ // was fully decoded successfully, and non-nil if an error was encountered
+ // in decoding and the packet was only partially decoded. Thus, its output
+ // can be used to determine if the entire packet was able to be decoded.
+ ErrorLayer() ErrorLayer
+
+ //// Functions for accessing data specific to the packet:
+ //// ------------------------------------------------------------------
+ // Data returns the set of bytes that make up this entire packet.
+ Data() []byte
+ // Metadata returns packet metadata associated with this packet.
+ Metadata() *PacketMetadata
+}
+
+// packet contains all the information we need to fulfill the Packet interface,
+// and its two "subclasses" (yes, no such thing in Go, bear with me),
+// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
+// various functions needed to access this information.
+type packet struct {
+ // data contains the entire packet data for a packet
+ data []byte
+ // initialLayers is space for an initial set of layers already created inside
+ // the packet.
+ initialLayers [6]Layer
+ // layers contains each layer we've already decoded
+ layers []Layer
+ // last is the last layer added to the packet
+ last Layer
+ // metadata is the PacketMetadata for this packet
+ metadata PacketMetadata
+
+ decodeOptions DecodeOptions
+
+ // Pointers to the various important layers
+ link LinkLayer
+ network NetworkLayer
+ transport TransportLayer
+ application ApplicationLayer
+ failure ErrorLayer
+}
+
+func (p *packet) SetTruncated() {
+ p.metadata.Truncated = true
+}
+
+func (p *packet) SetLinkLayer(l LinkLayer) {
+ if p.link == nil {
+ p.link = l
+ }
+}
+
+func (p *packet) SetNetworkLayer(l NetworkLayer) {
+ if p.network == nil {
+ p.network = l
+ }
+}
+
+func (p *packet) SetTransportLayer(l TransportLayer) {
+ if p.transport == nil {
+ p.transport = l
+ }
+}
+
+func (p *packet) SetApplicationLayer(l ApplicationLayer) {
+ if p.application == nil {
+ p.application = l
+ }
+}
+
+func (p *packet) SetErrorLayer(l ErrorLayer) {
+ if p.failure == nil {
+ p.failure = l
+ }
+}
+
+func (p *packet) AddLayer(l Layer) {
+ p.layers = append(p.layers, l)
+ p.last = l
+}
+
+func (p *packet) DumpPacketData() {
+ fmt.Fprint(os.Stderr, p.packetDump())
+ os.Stderr.Sync()
+}
+
+func (p *packet) Metadata() *PacketMetadata {
+ return &p.metadata
+}
+
+func (p *packet) Data() []byte {
+ return p.data
+}
+
+func (p *packet) DecodeOptions() *DecodeOptions {
+ return &p.decodeOptions
+}
+
+func (p *packet) addFinalDecodeError(err error, stack []byte) {
+ fail := &DecodeFailure{err: err, stack: stack}
+ if p.last == nil {
+ fail.data = p.data
+ } else {
+ fail.data = p.last.LayerPayload()
+ }
+ p.AddLayer(fail)
+ p.SetErrorLayer(fail)
+}
+
+func (p *packet) recoverDecodeError() {
+ if !p.decodeOptions.SkipDecodeRecovery {
+ if r := recover(); r != nil {
+ p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
+ }
+ }
+}
+
+// LayerString outputs an individual layer as a string. The layer is output
+// in a single line, with no trailing newline. This function is specifically
+// designed to do the right thing for most layers... it follows the following
+// rules:
+// * If the Layer has a String function, just output that.
+// * Otherwise, output all exported fields in the layer, recursing into
+// exported slices and structs.
+// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported
+// and unexported fields... many times packet layers contain unexported stuff
+// that would just mess up the output of the layer, see for example the
+// Payload layer and it's internal 'data' field, which contains a large byte
+// array that would really mess up formatting.
+func LayerString(l Layer) string {
+ return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
+}
+
+// Dumper dumps verbose information on a value. If a layer type implements
+// Dumper, then its LayerDump() string will include the results in its output.
+type Dumper interface {
+ Dump() string
+}
+
+// LayerDump outputs a very verbose string representation of a layer. Its
+// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
+// It contains newlines and ends with a newline.
+func LayerDump(l Layer) string {
+ var b bytes.Buffer
+ b.WriteString(LayerString(l))
+ b.WriteByte('\n')
+ if d, ok := l.(Dumper); ok {
+ dump := d.Dump()
+ if dump != "" {
+ b.WriteString(dump)
+ if dump[len(dump)-1] != '\n' {
+ b.WriteByte('\n')
+ }
+ }
+ }
+ b.WriteString(hex.Dump(l.LayerContents()))
+ return b.String()
+}
+
+// layerString outputs, recursively, a layer in a "smart" way. See docs for
+// LayerString for more details.
+//
+// Params:
+// i - value to write out
+// anonymous: if we're currently recursing an anonymous member of a struct
+// writeSpace: if we've already written a value in a struct, and need to
+// write a space before writing more. This happens when we write various
+// anonymous values, and need to keep writing more.
+func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
+ // Let String() functions take precedence.
+ if v.CanInterface() {
+ if s, ok := v.Interface().(fmt.Stringer); ok {
+ return s.String()
+ }
+ }
+ // Reflect, and spit out all the exported fields as key=value.
+ switch v.Type().Kind() {
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return "nil"
+ }
+ r := v.Elem()
+ return layerString(r, anonymous, writeSpace)
+ case reflect.Struct:
+ var b bytes.Buffer
+ typ := v.Type()
+ if !anonymous {
+ b.WriteByte('{')
+ }
+ for i := 0; i < v.NumField(); i++ {
+ // Check if this is upper-case.
+ ftype := typ.Field(i)
+ f := v.Field(i)
+ if ftype.Anonymous {
+ anonStr := layerString(f, true, writeSpace)
+ writeSpace = writeSpace || anonStr != ""
+ b.WriteString(anonStr)
+ } else if ftype.PkgPath == "" { // exported
+ if writeSpace {
+ b.WriteByte(' ')
+ }
+ writeSpace = true
+ fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
+ }
+ }
+ if !anonymous {
+ b.WriteByte('}')
+ }
+ return b.String()
+ case reflect.Slice:
+ var b bytes.Buffer
+ b.WriteByte('[')
+ if v.Len() > 4 {
+ fmt.Fprintf(&b, "..%d..", v.Len())
+ } else {
+ for j := 0; j < v.Len(); j++ {
+ if j != 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(layerString(v.Index(j), false, false))
+ }
+ }
+ b.WriteByte(']')
+ return b.String()
+ }
+ return fmt.Sprintf("%v", v.Interface())
+}
+
+const (
+ longBytesLength = 128
+)
+
+// LongBytesGoString returns a string representation of the byte slice shortened
+// using the format '<type>{<truncated slice> ... (<n> bytes)}' if it
+// exceeds a predetermined length. Can be used to avoid filling the display with
+// very long byte strings.
+func LongBytesGoString(buf []byte) string {
+ if len(buf) < longBytesLength {
+ return fmt.Sprintf("%#v", buf)
+ }
+ s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
+ s = strings.TrimSuffix(s, "}")
+ return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
+}
+
+func baseLayerString(value reflect.Value) string {
+ t := value.Type()
+ content := value.Field(0)
+ c := make([]byte, content.Len())
+ for i := range c {
+ c[i] = byte(content.Index(i).Uint())
+ }
+ payload := value.Field(1)
+ p := make([]byte, payload.Len())
+ for i := range p {
+ p[i] = byte(payload.Index(i).Uint())
+ }
+ return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
+ LongBytesGoString(c),
+ LongBytesGoString(p))
+}
+
+func layerGoString(i interface{}, b *bytes.Buffer) {
+ if s, ok := i.(fmt.GoStringer); ok {
+ b.WriteString(s.GoString())
+ return
+ }
+
+ var v reflect.Value
+ var ok bool
+ if v, ok = i.(reflect.Value); !ok {
+ v = reflect.ValueOf(i)
+ }
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if v.Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ }
+ layerGoString(v.Elem().Interface(), b)
+ case reflect.Struct:
+ t := v.Type()
+ b.WriteString(t.String())
+ b.WriteByte('{')
+ for i := 0; i < v.NumField(); i++ {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ if t.Field(i).Name == "BaseLayer" {
+ fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
+ } else if v.Field(i).Kind() == reflect.Struct {
+ fmt.Fprintf(b, "%s:", t.Field(i).Name)
+ layerGoString(v.Field(i), b)
+ } else if v.Field(i).Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ layerGoString(v.Field(i), b)
+ } else {
+ fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
+ }
+ }
+ b.WriteByte('}')
+ default:
+ fmt.Fprintf(b, "%#v", i)
+ }
+}
+
+// LayerGoString returns a representation of the layer in Go syntax,
+// taking care to shorten "very long" BaseLayer byte slices
+func LayerGoString(l Layer) string {
+ b := new(bytes.Buffer)
+ layerGoString(l, b)
+ return b.String()
+}
+
+func (p *packet) packetString() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
+ if p.metadata.Truncated {
+ b.WriteString(", truncated")
+ }
+ if p.metadata.Length > 0 {
+ fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
+ }
+ if !p.metadata.Timestamp.IsZero() {
+ fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
+ }
+ b.WriteByte('\n')
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
+ }
+ return b.String()
+}
+
+func (p *packet) packetDump() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
+ }
+ return b.String()
+}
+
+// eagerPacket is a packet implementation that does eager decoding. Upon
+// initial construction, it decodes all the layers it can from packet data.
+// eagerPacket implements Packet and PacketBuilder.
+type eagerPacket struct {
+ packet
+}
+
+var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
+
+func (p *eagerPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ if p.last == nil {
+ return errors.New("NextDecoder called, but no layers added yet")
+ }
+ d := p.last.LayerPayload()
+ if len(d) == 0 {
+ return nil
+ }
+ // Since we're eager, immediately call the next decoder.
+ return next.Decode(d, p)
+}
+func (p *eagerPacket) initialDecode(dec Decoder) {
+ defer p.recoverDecodeError()
+ err := dec.Decode(p.data, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *eagerPacket) LinkLayer() LinkLayer {
+ return p.link
+}
+func (p *eagerPacket) NetworkLayer() NetworkLayer {
+ return p.network
+}
+func (p *eagerPacket) TransportLayer() TransportLayer {
+ return p.transport
+}
+func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
+ return p.application
+}
+func (p *eagerPacket) ErrorLayer() ErrorLayer {
+ return p.failure
+}
+func (p *eagerPacket) Layers() []Layer {
+ return p.layers
+}
+func (p *eagerPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) String() string { return p.packetString() }
+func (p *eagerPacket) Dump() string { return p.packetDump() }
+
+// lazyPacket does lazy decoding on its packet data. On construction it does
+// no initial decoding. For each function call, it decodes only as many layers
+// as are necessary to compute the return value for that function.
+// lazyPacket implements Packet and PacketBuilder.
+type lazyPacket struct {
+ packet
+ next Decoder
+}
+
+func (p *lazyPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ p.next = next
+ return nil
+}
+func (p *lazyPacket) decodeNextLayer() {
+ if p.next == nil {
+ return
+ }
+ d := p.data
+ if p.last != nil {
+ d = p.last.LayerPayload()
+ }
+ next := p.next
+ p.next = nil
+ // We've just set p.next to nil, so if we see we have no data, this should be
+ // the final call we get to decodeNextLayer if we return here.
+ if len(d) == 0 {
+ return
+ }
+ defer p.recoverDecodeError()
+ err := next.Decode(d, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *lazyPacket) LinkLayer() LinkLayer {
+ for p.link == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.link
+}
+func (p *lazyPacket) NetworkLayer() NetworkLayer {
+ for p.network == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.network
+}
+func (p *lazyPacket) TransportLayer() TransportLayer {
+ for p.transport == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.transport
+}
+func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
+ for p.application == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.application
+}
+func (p *lazyPacket) ErrorLayer() ErrorLayer {
+ for p.failure == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.failure
+}
+func (p *lazyPacket) Layers() []Layer {
+ for p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.layers
+}
+func (p *lazyPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
+func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() }
+
+// DecodeOptions tells gopacket how to decode a packet.
+type DecodeOptions struct {
+ // Lazy decoding decodes the minimum number of layers needed to return data
+ // for a packet at each function call. Be careful using this with concurrent
+ // packet processors, as each call to packet.* could mutate the packet, and
+ // two concurrent function calls could interact poorly.
+ Lazy bool
+ // NoCopy decoding doesn't copy its input buffer into storage that's owned by
+ // the packet. If you can guarantee that the bytes underlying the slice
+ // passed into NewPacket aren't going to be modified, this can be faster. If
+ // there's any chance that those bytes WILL be changed, this will invalidate
+ // your packets.
+ NoCopy bool
+ // SkipDecodeRecovery skips over panic recovery during packet decoding.
+ // Normally, when packets decode, if a panic occurs, that panic is captured
+ // by a recover(), and a DecodeFailure layer is added to the packet detailing
+ // the issue. If this flag is set, panics are instead allowed to continue up
+ // the stack.
+ SkipDecodeRecovery bool
+ // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
+ // decoder. If true, we should try to decode layers after TCP in single packets.
+ // This is disabled by default because the reassembly package drives the decoding
+ // of TCP payload data after reassembly.
+ DecodeStreamsAsDatagrams bool
+}
+
+// Default decoding provides the safest (but slowest) method for decoding
+// packets. It eagerly processes all layers (so it's concurrency-safe) and it
+// copies its input buffer upon creation of the packet (so the packet remains
+// valid if the underlying slice is modified. Both of these take time,
+// though, so beware. If you can guarantee that the packet will only be used
+// by one goroutine at a time, set Lazy decoding. If you can guarantee that
+// the underlying slice won't change, set NoCopy decoding.
+var Default = DecodeOptions{}
+
+// Lazy is a DecodeOptions with just Lazy set.
+var Lazy = DecodeOptions{Lazy: true}
+
+// NoCopy is a DecodeOptions with just NoCopy set.
+var NoCopy = DecodeOptions{NoCopy: true}
+
+// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
+var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
+
+// NewPacket creates a new Packet object from a set of bytes. The
+// firstLayerDecoder tells it how to interpret the first layer from the bytes,
+// future layers will be generated from that first layer automatically.
+func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet {
+ if !options.NoCopy {
+ dataCopy := make([]byte, len(data))
+ copy(dataCopy, data)
+ data = dataCopy
+ }
+ if options.Lazy {
+ p := &lazyPacket{
+ packet: packet{data: data, decodeOptions: options},
+ next: firstLayerDecoder,
+ }
+ p.layers = p.initialLayers[:0]
+ // Crazy craziness:
+ // If the following return statemet is REMOVED, and Lazy is FALSE, then
+ // eager packet processing becomes 17% FASTER. No, there is no logical
+ // explanation for this. However, it's such a hacky micro-optimization that
+ // we really can't rely on it. It appears to have to do with the size the
+ // compiler guesses for this function's stack space, since one symptom is
+ // that with the return statement in place, we more than double calls to
+ // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better
+ // over time and we get this optimization for free. Until then, we'll have
+ // to live with slower packet processing.
+ return p
+ }
+ p := &eagerPacket{
+ packet: packet{data: data, decodeOptions: options},
+ }
+ p.layers = p.initialLayers[:0]
+ p.initialDecode(firstLayerDecoder)
+ return p
+}
+
+// PacketDataSource is an interface for some source of packet data. Users may
+// create their own implementations, or use the existing implementations in
+// gopacket/pcap (libpcap, allows reading from live interfaces or from
+// pcap files) or gopacket/pfring (PF_RING, allows reading from live
+// interfaces).
+type PacketDataSource interface {
+ // ReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
+// of internal PacketDataSources, each of which will stop with io.EOF after
+// reading a finite number of packets. The returned PacketDataSource will
+// return all packets from the first finite source, followed by all packets from
+// the second, etc. Once all finite sources have returned io.EOF, the returned
+// source will as well.
+func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
+ c := concat(pds)
+ return &c
+}
+
+type concat []PacketDataSource
+
+func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
+ for len(*c) > 0 {
+ data, ci, err = (*c)[0].ReadPacketData()
+ if err == io.EOF {
+ *c = (*c)[1:]
+ continue
+ }
+ return
+ }
+ return nil, CaptureInfo{}, io.EOF
+}
+
+// ZeroCopyPacketDataSource is an interface to pull packet data from sources
+// that allow data to be returned without copying to a user-controlled buffer.
+// It's very similar to PacketDataSource, except that the caller must be more
+// careful in how the returned buffer is handled.
+type ZeroCopyPacketDataSource interface {
+ // ZeroCopyReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet. Unlike with
+ // PacketDataSource's ReadPacketData, the slice returned here points
+ // to a buffer owned by the data source. In particular, the bytes in
+ // this buffer may be changed by future calls to
+ // ZeroCopyReadPacketData. Do not use the returned buffer after
+ // subsequent ZeroCopyReadPacketData calls.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// PacketSource reads in packets from a PacketDataSource, decodes them, and
+// returns them.
+//
+// There are currently two different methods for reading packets in through
+// a PacketSource:
+//
+// Reading With Packets Function
+//
+// This method is the most convenient and easiest to code, but lacks
+// flexibility. Packets returns a 'chan Packet', then asynchronously writes
+// packets into that channel. Packets uses a blocking channel, and closes
+// it if an io.EOF is returned by the underlying PacketDataSource. All other
+// PacketDataSource errors are ignored and discarded.
+// for packet := range packetSource.Packets() {
+// ...
+// }
+//
+// Reading With NextPacket Function
+//
+// This method is the most flexible, and exposes errors that may be
+// encountered by the underlying PacketDataSource. It's also the fastest
+// in a tight loop, since it doesn't have the overhead of a channel
+// read/write. However, it requires the user to handle errors, most
+// importantly the io.EOF error in cases where packets are being read from
+// a file.
+// for {
+// packet, err := packetSource.NextPacket()
+// if err == io.EOF {
+// break
+// } else if err != nil {
+// log.Println("Error:", err)
+// continue
+// }
+// handlePacket(packet) // Do something with each packet.
+// }
+type PacketSource struct {
+ source PacketDataSource
+ decoder Decoder
+ // DecodeOptions is the set of options to use for decoding each piece
+ // of packet data. This can/should be changed by the user to reflect the
+ // way packets should be decoded.
+ DecodeOptions
+ c chan Packet
+}
+
+// NewPacketSource creates a packet data source.
+func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource {
+ return &PacketSource{
+ source: source,
+ decoder: decoder,
+ }
+}
+
+// NextPacket returns the next decoded packet from the PacketSource. On error,
+// it returns a nil packet and a non-nil error.
+func (p *PacketSource) NextPacket() (Packet, error) {
+ data, ci, err := p.source.ReadPacketData()
+ if err != nil {
+ return nil, err
+ }
+ packet := NewPacket(data, p.decoder, p.DecodeOptions)
+ m := packet.Metadata()
+ m.CaptureInfo = ci
+ m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
+ return packet, nil
+}
+
+// packetsToChannel reads in all packets from the packet source and sends them
+// to the given channel. This routine terminates when a non-temporary error
+// is returned by NextPacket().
+func (p *PacketSource) packetsToChannel() {
+ defer close(p.c)
+ for {
+ packet, err := p.NextPacket()
+ if err == nil {
+ p.c <- packet
+ continue
+ }
+
+ // Immediately retry for temporary network errors
+ if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
+ continue
+ }
+
+ // Immediately retry for EAGAIN
+ if err == syscall.EAGAIN {
+ continue
+ }
+
+ // Immediately break for known unrecoverable errors
+ if err == io.EOF || err == io.ErrUnexpectedEOF ||
+ err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer ||
+ err == syscall.EBADF ||
+ strings.Contains(err.Error(), "use of closed file") {
+ break
+ }
+
+ // Sleep briefly and try again
+ time.Sleep(time.Millisecond * time.Duration(5))
+ }
+}
+
+// Packets returns a channel of packets, allowing easy iterating over
+// packets. Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel. If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+//
+// for packet := range packetSource.Packets() {
+// handlePacket(packet) // Do something with each packet.
+// }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) Packets() chan Packet {
+ if p.c == nil {
+ p.c = make(chan Packet, 1000)
+ go p.packetsToChannel()
+ }
+ return p.c
+}
diff --git a/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/google/gopacket/parser.go
new file mode 100644
index 0000000..e5dc0e4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/parser.go
@@ -0,0 +1,207 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// DecodingLayer is an interface for packet layers that can decode themselves.
+//
+// The important part of DecodingLayer is that they decode themselves in-place.
+// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
+// the new state defined by the data passed in. A returned error leaves the
+// DecodingLayer in an unknown intermediate state, thus its fields should not be
+// trusted.
+//
+// Because the DecodingLayer is resetting its own fields, a call to
+// DecodeFromBytes should normally not require any memory allocation.
+type DecodingLayer interface {
+ // DecodeFromBytes resets the internal state of this layer to the state
+ // defined by the passed-in bytes. Slices in the DecodingLayer may
+ // reference the passed-in data, so care should be taken to copy it
+ // first should later modification of data be required before the
+ // DecodingLayer is discarded.
+ DecodeFromBytes(data []byte, df DecodeFeedback) error
+ // CanDecode returns the set of LayerTypes this DecodingLayer can
+ // decode. For Layers that are also DecodingLayers, this will most
+ // often be that Layer's LayerType().
+ CanDecode() LayerClass
+ // NextLayerType returns the LayerType which should be used to decode
+ // the LayerPayload.
+ NextLayerType() LayerType
+ // LayerPayload is the set of bytes remaining to decode after a call to
+ // DecodeFromBytes.
+ LayerPayload() []byte
+}
+
+// DecodingLayerParser parses a given set of layer types. See DecodeLayers for
+// more information on how DecodingLayerParser should be used.
+type DecodingLayerParser struct {
+ // DecodingLayerParserOptions is the set of options available to the
+ // user to define the parser's behavior.
+ DecodingLayerParserOptions
+ first LayerType
+ decoders map[LayerType]DecodingLayer
+ df DecodeFeedback
+ // Truncated is set when a decode layer detects that the packet has been
+ // truncated.
+ Truncated bool
+}
+
+// AddDecodingLayer adds a decoding layer to the parser. This adds support for
+// the decoding layer's CanDecode layers to the parser... should they be
+// encountered, they'll be parsed.
+func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
+ for _, typ := range d.CanDecode().LayerTypes() {
+ l.decoders[typ] = d
+ }
+}
+
+// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
+// DecodingLayerParser. Users should simply read Truncated after calling
+// DecodeLayers.
+func (l *DecodingLayerParser) SetTruncated() {
+ l.Truncated = true
+}
+
+// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
+// of the given DecodingLayers with AddDecodingLayer.
+//
+// Each call to DecodeLayers will attempt to decode the given bytes first by
+// treating them as a 'first'-type layer, then by using NextLayerType on
+// subsequently decoded layers to find the next relevant decoder. Should a
+// deoder not be available for the layer type returned by NextLayerType,
+// decoding will stop.
+func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
+ dlp := &DecodingLayerParser{
+ decoders: make(map[LayerType]DecodingLayer),
+ first: first,
+ }
+ dlp.df = dlp // Cast this once to the interface
+ for _, d := range decoders {
+ dlp.AddDecodingLayer(d)
+ }
+ return dlp
+}
+
+// DecodeLayers decodes as many layers as possible from the given data. It
+// initially treats the data as layer type 'typ', then uses NextLayerType on
+// each subsequent decoded layer until it gets to a layer type it doesn't know
+// how to parse.
+//
+// For each layer successfully decoded, DecodeLayers appends the layer type to
+// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so
+// there's no need to empty it yourself.
+//
+// This decoding method is about an order of magnitude faster than packet
+// decoding, because it only decodes known layers that have already been
+// allocated. This means it doesn't need to allocate each layer it returns...
+// instead it overwrites the layers that already exist.
+//
+// Example usage:
+// func main() {
+// var eth layers.Ethernet
+// var ip4 layers.IPv4
+// var ip6 layers.IPv6
+// var tcp layers.TCP
+// var udp layers.UDP
+// var payload gopacket.Payload
+// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload)
+// var source gopacket.PacketDataSource = getMyDataSource()
+// decodedLayers := make([]gopacket.LayerType, 0, 10)
+// for {
+// data, _, err := source.ReadPacketData()
+// if err != nil {
+// fmt.Println("Error reading packet data: ", err)
+// continue
+// }
+// fmt.Println("Decoding packet")
+// err = parser.DecodeLayers(data, &decodedLayers)
+// for _, typ := range decodedLayers {
+// fmt.Println(" Successfully decoded layer type", typ)
+// switch typ {
+// case layers.LayerTypeEthernet:
+// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC)
+// case layers.LayerTypeIPv4:
+// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
+// case layers.LayerTypeIPv6:
+// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
+// case layers.LayerTypeTCP:
+// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort)
+// case layers.LayerTypeUDP:
+// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort)
+// }
+// }
+// if decodedLayers.Truncated {
+// fmt.Println(" Packet has been truncated")
+// }
+// if err != nil {
+// fmt.Println(" Error encountered:", err)
+// }
+// }
+// }
+//
+// If DecodeLayers is unable to decode the next layer type, it will return the
+// error UnsupportedLayerType.
+func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
+ l.Truncated = false
+ if !l.IgnorePanic {
+ defer panicToError(&err)
+ }
+ typ := l.first
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ for len(data) > 0 {
+ decoder, ok := l.decoders[typ]
+ if !ok {
+ if l.IgnoreUnsupported {
+ return nil
+ }
+ return UnsupportedLayerType(typ)
+ } else if err = decoder.DecodeFromBytes(data, l.df); err != nil {
+ return err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ data = decoder.LayerPayload()
+ }
+ return nil
+}
+
+// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
+// encounters a layer type that the DecodingLayerParser has no decoder for.
+type UnsupportedLayerType LayerType
+
+// Error implements the error interface, returning a string to say that the
+// given layer type is unsupported.
+func (e UnsupportedLayerType) Error() string {
+ return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
+}
+
+func panicToError(e *error) {
+ if r := recover(); r != nil {
+ *e = fmt.Errorf("panic: %v", r)
+ }
+}
+
+// DecodingLayerParserOptions provides options to affect the behavior of a given
+// DecodingLayerParser.
+type DecodingLayerParserOptions struct {
+ // IgnorePanic determines whether a DecodingLayerParser should stop
+ // panics on its own (by returning them as an error from DecodeLayers)
+ // or should allow them to raise up the stack. Handling errors does add
+ // latency to the process of decoding layers, but is much safer for
+ // callers. IgnorePanic defaults to false, thus if the caller does
+ // nothing decode panics will be returned as errors.
+ IgnorePanic bool
+ // IgnoreUnsupported will stop parsing and return a nil error when it
+ // encounters a layer it doesn't have a parser for, instead of returning an
+ // UnsupportedLayerType error. If this is true, it's up to the caller to make
+ // sure that all expected layers have been parsed (by checking the decoded
+ // slice).
+ IgnoreUnsupported bool
+}
diff --git a/vendor/github.com/google/gopacket/time.go b/vendor/github.com/google/gopacket/time.go
new file mode 100644
index 0000000..6d116cd
--- /dev/null
+++ b/vendor/github.com/google/gopacket/time.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "math"
+ "time"
+)
+
+// TimestampResolution represents the resolution of timestamps in Base^Exponent.
+type TimestampResolution struct {
+ Base, Exponent int
+}
+
+func (t TimestampResolution) String() string {
+ return fmt.Sprintf("%d^%d", t.Base, t.Exponent)
+}
+
+// ToDuration returns the smallest representable time difference as a time.Duration
+func (t TimestampResolution) ToDuration() time.Duration {
+ if t.Base == 0 {
+ return 0
+ }
+ if t.Exponent == 0 {
+ return time.Second
+ }
+ switch t.Base {
+ case 10:
+ return time.Duration(math.Pow10(t.Exponent + 9))
+ case 2:
+ if t.Exponent < 0 {
+ return time.Second >> uint(-t.Exponent)
+ }
+ return time.Second << uint(t.Exponent)
+ default:
+ // this might loose precision
+ return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent)))
+ }
+}
+
+// TimestampResolutionInvalid represents an invalid timestamp resolution
+var TimestampResolutionInvalid = TimestampResolution{}
+
+// TimestampResolutionMillisecond is a resolution of 10^-3s
+var TimestampResolutionMillisecond = TimestampResolution{10, -3}
+
+// TimestampResolutionMicrosecond is a resolution of 10^-6s
+var TimestampResolutionMicrosecond = TimestampResolution{10, -6}
+
+// TimestampResolutionNanosecond is a resolution of 10^-9s
+var TimestampResolutionNanosecond = TimestampResolution{10, -9}
+
+// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds
+var TimestampResolutionNTP = TimestampResolution{2, -32}
+
+// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond
+var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond
+
+// PacketSourceResolution is an interface for packet data sources that
+// support reporting the timestamp resolution of the aqcuired timestamps.
+// Returned timestamps will always have NanosecondTimestampResolution due
+// to the use of time.Time, but scaling might have occured if acquired
+// timestamps have a different resolution.
+type PacketSourceResolution interface {
+ // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+ Resolution() TimestampResolution
+}
diff --git a/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/google/gopacket/writer.go
new file mode 100644
index 0000000..5d303dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/writer.go
@@ -0,0 +1,232 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// SerializableLayer allows its implementations to be written out as a set of bytes,
+// so those bytes may be sent on the wire or otherwise used by the caller.
+// SerializableLayer is implemented by certain Layer types, and can be encoded to
+// bytes using the LayerWriter object.
+type SerializableLayer interface {
+ // SerializeTo writes this layer to a slice, growing that slice if necessary
+ // to make it fit the layer's data.
+ // Args:
+ // b: SerializeBuffer to write this layer on to. When called, b.Bytes()
+ // is the payload this layer should wrap, if any. Note that this
+ // layer can either prepend itself (common), append itself
+ // (uncommon), or both (sometimes padding or footers are required at
+ // the end of packet data). It's also possible (though probably very
+ // rarely needed) to overwrite any bytes in the current payload.
+ // After this call, b.Bytes() should return the byte encoding of
+ // this layer wrapping the original b.Bytes() payload.
+ // opts: options to use while writing out data.
+ // Returns:
+ // error if a problem was encountered during encoding. If an error is
+ // returned, the bytes in data should be considered invalidated, and
+ // not used.
+ //
+ // SerializeTo calls SHOULD entirely ignore LayerContents and
+ // LayerPayload. It just serializes based on struct fields, neither
+ // modifying nor using contents/payload.
+ SerializeTo(b SerializeBuffer, opts SerializeOptions) error
+ // LayerType returns the type of the layer that is being serialized to the buffer
+ LayerType() LayerType
+}
+
+// SerializeOptions provides options for behaviors that SerializableLayers may want to
+// implement.
+type SerializeOptions struct {
+ // FixLengths determines whether, during serialization, layers should fix
+ // the values for any length field that depends on the payload.
+ FixLengths bool
+ // ComputeChecksums determines whether, during serialization, layers
+ // should recompute checksums based on their payloads.
+ ComputeChecksums bool
+}
+
+// SerializeBuffer is a helper used by gopacket for writing out packet layers.
+// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes
+// return byte slices before the current Bytes(), AppendBytes returns byte
+// slices after.
+//
+// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
+// you want to make sure they're all zeros, set them as such.
+//
+// SerializeBuffer is specifically designed to handle packet writing, where unlike
+// with normal writes it's easier to start writing at the inner-most layer and
+// work out, meaning that we often need to prepend bytes. This runs counter to
+// typical writes to byte slices using append(), where we only write at the end
+// of the buffer.
+//
+// It can be reused via Clear. Note, however, that a Clear call will invalidate the
+// byte slices returned by any previous Bytes() call (the same buffer is
+// reused).
+//
+// 1) Reusing a write buffer is generally much faster than creating a new one,
+// and with the default implementation it avoids additional memory allocations.
+// 2) If a byte slice from a previous Bytes() call will continue to be used,
+// it's better to create a new SerializeBuffer.
+//
+// The Clear method is specifically designed to minimize memory allocations for
+// similar later workloads on the SerializeBuffer. IE: if you make a set of
+// Prepend/Append calls, then clear, then make the same calls with the same
+// sizes, the second round (and all future similar rounds) shouldn't allocate
+// any new memory.
+type SerializeBuffer interface {
+ // Bytes returns the contiguous set of bytes collected so far by Prepend/Append
+ // calls. The slice returned by Bytes will be modified by future Clear calls,
+ // so if you're planning on clearing this SerializeBuffer, you may want to copy
+ // Bytes somewhere safe first.
+ Bytes() []byte
+ // PrependBytes returns a set of bytes which prepends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call PrependBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ PrependBytes(num int) ([]byte, error)
+ // AppendBytes returns a set of bytes which appends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call AppendBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ AppendBytes(num int) ([]byte, error)
+ // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear,
+ // the byte slice returned by any previous call to Bytes() for this buffer
+ // should be considered invalidated.
+ Clear() error
+ // Layers returns all the Layers that have been successfully serialized into this buffer
+ // already.
+ Layers() []LayerType
+ // PushLayer adds the current Layer to the list of Layers that have been serialized
+ // into this buffer.
+ PushLayer(LayerType)
+}
+
+type serializeBuffer struct {
+ data []byte
+ start int
+ prepended, appended int
+ layers []LayerType
+}
+
+// NewSerializeBuffer creates a new instance of the default implementation of
+// the SerializeBuffer interface.
+func NewSerializeBuffer() SerializeBuffer {
+ return &serializeBuffer{}
+}
+
+// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
+// expected number of bytes prepended/appended. This tends to decrease the
+// number of memory allocations made by the buffer during writes.
+func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
+ return &serializeBuffer{
+ data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
+ start: expectedPrependLength,
+ prepended: expectedPrependLength,
+ appended: expectedAppendLength,
+ }
+}
+
+func (w *serializeBuffer) Bytes() []byte {
+ return w.data[w.start:]
+}
+
+func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ if w.start < num {
+ toPrepend := w.prepended
+ if toPrepend < num {
+ toPrepend = num
+ }
+ w.prepended += toPrepend
+ length := cap(w.data) + toPrepend
+ newData := make([]byte, length)
+ newStart := w.start + toPrepend
+ copy(newData[newStart:], w.data[w.start:])
+ w.start = newStart
+ w.data = newData[:toPrepend+len(w.data)]
+ }
+ w.start -= num
+ return w.data[w.start : w.start+num], nil
+}
+
+func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ initialLength := len(w.data)
+ if cap(w.data)-initialLength < num {
+ toAppend := w.appended
+ if toAppend < num {
+ toAppend = num
+ }
+ w.appended += toAppend
+ newData := make([]byte, cap(w.data)+toAppend)
+ copy(newData[w.start:], w.data[w.start:])
+ w.data = newData[:initialLength]
+ }
+ // Grow the buffer. We know it'll be under capacity given above.
+ w.data = w.data[:initialLength+num]
+ return w.data[initialLength:], nil
+}
+
+func (w *serializeBuffer) Clear() error {
+ w.start = w.prepended
+ w.data = w.data[:w.start]
+ w.layers = w.layers[:0]
+ return nil
+}
+
+func (w *serializeBuffer) Layers() []LayerType {
+ return w.layers
+}
+
+func (w *serializeBuffer) PushLayer(l LayerType) {
+ w.layers = append(w.layers, l)
+}
+
+// SerializeLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other. Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+// buf := gopacket.NewSerializeBuffer()
+// opts := gopacket.SerializeOptions{}
+// gopacket.SerializeLayers(buf, opts, a, b, c)
+// firstPayload := buf.Bytes() // contains byte representation of a(b(c))
+// gopacket.SerializeLayers(buf, opts, d, e, f)
+// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
+ w.Clear()
+ for i := len(layers) - 1; i >= 0; i-- {
+ layer := layers[i]
+ err := layer.SerializeTo(w, opts)
+ if err != nil {
+ return err
+ }
+ w.PushLayer(layer.LayerType())
+ }
+ return nil
+}
+
+// SerializePacket is a convenience function that calls SerializeLayers
+// on packet's Layers().
+// It returns an error if one of the packet layers is not a SerializableLayer.
+func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
+ sls := []SerializableLayer{}
+ for _, layer := range packet.Layers() {
+ sl, ok := layer.(SerializableLayer)
+ if !ok {
+ return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
+ }
+ sls = append(sls, sl)
+ }
+ return SerializeLayers(buf, opts, sls...)
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
new file mode 100644
index 0000000..f38ec59
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..bf89ecd
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,622 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// assert.Lessf(t, 1, 2, "error message %s", "formatted")
+// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..d2bb0b8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 0000000..75ecdca
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,1232 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(a.t, err, msgAndArgs...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Errorf(a.t, err, msg, args...)
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExistsf(a.t, path, msg, args...)
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// a.Greater(2, 1)
+// a.Greater(float64(2), float64(1))
+// a.Greater("b", "a")
+func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqual(2, 1)
+// a.GreaterOrEqual(2, 2)
+// a.GreaterOrEqual("b", "a")
+// a.GreaterOrEqual("b", "b")
+func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// a.Greaterf(2, 1, "error message %s", "formatted")
+// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf("b", "a", "error message %s", "formatted")
+func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greaterf(a.t, e1, e2, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, 22/7.0, 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lenf(a.t, object, length, msg, args...)
+}
+
+// Less asserts that the first element is less than the second
+//
+// a.Less(1, 2)
+// a.Less(float64(1), float64(2))
+// a.Less("a", "b")
+func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqual(1, 2)
+// a.LessOrEqual(2, 2)
+// a.LessOrEqual("a", "b")
+// a.LessOrEqual("b", "b")
+func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqualf(1, 2, "error message %s", "formatted")
+// a.LessOrEqualf(2, 2, "error message %s", "formatted")
+// a.LessOrEqualf("a", "b", "error message %s", "formatted")
+// a.LessOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// a.Lessf(1, 2, "error message %s", "formatted")
+// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf("a", "b", "error message %s", "formatted")
+func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lessf(a.t, e1, e2, msg, args...)
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nilf(a.t, object, msg, args...)
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoErrorf(a.t, err, msg, args...)
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panicsf(a.t, f, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// a.Same(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(a.t, expected, actual, msgAndArgs...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Samef(a.t, expected, actual, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 0000000..188bb9e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
new file mode 100644
index 0000000..15a486c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -0,0 +1,309 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
+ switch kind {
+ case reflect.Int:
+ {
+ intobj1 := obj1.(int)
+ intobj2 := obj2.(int)
+ if intobj1 > intobj2 {
+ return -1, true
+ }
+ if intobj1 == intobj2 {
+ return 0, true
+ }
+ if intobj1 < intobj2 {
+ return 1, true
+ }
+ }
+ case reflect.Int8:
+ {
+ int8obj1 := obj1.(int8)
+ int8obj2 := obj2.(int8)
+ if int8obj1 > int8obj2 {
+ return -1, true
+ }
+ if int8obj1 == int8obj2 {
+ return 0, true
+ }
+ if int8obj1 < int8obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Int16:
+ {
+ int16obj1 := obj1.(int16)
+ int16obj2 := obj2.(int16)
+ if int16obj1 > int16obj2 {
+ return -1, true
+ }
+ if int16obj1 == int16obj2 {
+ return 0, true
+ }
+ if int16obj1 < int16obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Int32:
+ {
+ int32obj1 := obj1.(int32)
+ int32obj2 := obj2.(int32)
+ if int32obj1 > int32obj2 {
+ return -1, true
+ }
+ if int32obj1 == int32obj2 {
+ return 0, true
+ }
+ if int32obj1 < int32obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Int64:
+ {
+ int64obj1 := obj1.(int64)
+ int64obj2 := obj2.(int64)
+ if int64obj1 > int64obj2 {
+ return -1, true
+ }
+ if int64obj1 == int64obj2 {
+ return 0, true
+ }
+ if int64obj1 < int64obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Uint:
+ {
+ uintobj1 := obj1.(uint)
+ uintobj2 := obj2.(uint)
+ if uintobj1 > uintobj2 {
+ return -1, true
+ }
+ if uintobj1 == uintobj2 {
+ return 0, true
+ }
+ if uintobj1 < uintobj2 {
+ return 1, true
+ }
+ }
+ case reflect.Uint8:
+ {
+ uint8obj1 := obj1.(uint8)
+ uint8obj2 := obj2.(uint8)
+ if uint8obj1 > uint8obj2 {
+ return -1, true
+ }
+ if uint8obj1 == uint8obj2 {
+ return 0, true
+ }
+ if uint8obj1 < uint8obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Uint16:
+ {
+ uint16obj1 := obj1.(uint16)
+ uint16obj2 := obj2.(uint16)
+ if uint16obj1 > uint16obj2 {
+ return -1, true
+ }
+ if uint16obj1 == uint16obj2 {
+ return 0, true
+ }
+ if uint16obj1 < uint16obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Uint32:
+ {
+ uint32obj1 := obj1.(uint32)
+ uint32obj2 := obj2.(uint32)
+ if uint32obj1 > uint32obj2 {
+ return -1, true
+ }
+ if uint32obj1 == uint32obj2 {
+ return 0, true
+ }
+ if uint32obj1 < uint32obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Uint64:
+ {
+ uint64obj1 := obj1.(uint64)
+ uint64obj2 := obj2.(uint64)
+ if uint64obj1 > uint64obj2 {
+ return -1, true
+ }
+ if uint64obj1 == uint64obj2 {
+ return 0, true
+ }
+ if uint64obj1 < uint64obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Float32:
+ {
+ float32obj1 := obj1.(float32)
+ float32obj2 := obj2.(float32)
+ if float32obj1 > float32obj2 {
+ return -1, true
+ }
+ if float32obj1 == float32obj2 {
+ return 0, true
+ }
+ if float32obj1 < float32obj2 {
+ return 1, true
+ }
+ }
+ case reflect.Float64:
+ {
+ float64obj1 := obj1.(float64)
+ float64obj2 := obj2.(float64)
+ if float64obj1 > float64obj2 {
+ return -1, true
+ }
+ if float64obj1 == float64obj2 {
+ return 0, true
+ }
+ if float64obj1 < float64obj2 {
+ return 1, true
+ }
+ }
+ case reflect.String:
+ {
+ stringobj1 := obj1.(string)
+ stringobj2 := obj2.(string)
+ if stringobj1 > stringobj2 {
+ return -1, true
+ }
+ if stringobj1 == stringobj2 {
+ return 0, true
+ }
+ if stringobj1 < stringobj2 {
+ return 1, true
+ }
+ }
+ }
+
+ return 0, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ res, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if res != -1 {
+ return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ res, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if res != -1 && res != 0 {
+ return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Less asserts that the first element is less than the second
+//
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ res, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if res != 1 {
+ return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ res, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if res != 1 && res != 0 {
+ return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..bdd8138
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,1626 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+ yaml "gopkg.in/yaml.v2"
+)
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+
+ exp, ok := expected.([]byte)
+ if !ok {
+ return reflect.DeepEqual(expected, actual)
+ }
+
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ }
+ if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+ if ObjectsAreEqual(expected, actual) {
+ return true
+ }
+
+ actualType := reflect.TypeOf(actual)
+ if actualType == nil {
+ return false
+ }
+ expectedValue := reflect.ValueOf(expected)
+ if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+ }
+
+ return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+ pc := uintptr(0)
+ file := ""
+ line := 0
+ ok := false
+ name := ""
+
+ callers := []string{}
+ for i := 0; ; i++ {
+ pc, file, line, ok = runtime.Caller(i)
+ if !ok {
+ // The breaks below failed to terminate the loop, and we ran off the
+ // end of the call stack.
+ break
+ }
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "<autogenerated>" {
+ break
+ }
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ segments := strings.Split(name, ".")
+ name = segments[len(segments)-1]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+ }
+
+ return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(rune)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ msg := msgAndArgs[0]
+ if msgAsStr, ok := msg.(string); ok {
+ return msgAsStr
+ }
+ return fmt.Sprintf("%+v", msg)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
+ if i != 0 {
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+type failNower interface {
+ FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, failureMessage, msgAndArgs...)
+
+ // We cannot extend TestingT with FailNow() and
+ // maintain backwards compatibility, so we fallback
+ // to panicking when FailNow is not available in
+ // TestingT.
+ // See issue #263
+
+ if t, ok := t.(failNower); ok {
+ t.FailNow()
+ } else {
+ panic("test failed and t is missing `FailNow()`")
+ }
+ return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+ {"Error", failureMessage},
+ }
+
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if len(message) > 0 {
+ content = append(content, labeledContent{"Messages", message})
+ }
+
+ t.Errorf("\n%s", ""+labeledOutput(content...))
+
+ return false
+}
+
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if !ObjectsAreEqual(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// assert.Same(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf("Not same: \n"+
+ "expected: %p %#v\n"+
+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%#v)", expected, expected),
+ fmt.Sprintf("%T(%#v)", actual, actual)
+ }
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !isNil(object) {
+ return true
+ }
+ return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// containsKind checks if a specified kind in the slice of kinds.
+func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
+ for i := 0; i < len(kinds); i++ {
+ if kind == kinds[i] {
+ return true
+ }
+ }
+
+ return false
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ isNilableKind := containsKind(
+ []reflect.Kind{
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice},
+ kind)
+
+ if isNilableKind && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isNil(object) {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ // get nil case out of the way
+ if object == nil {
+ return true
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
+ case reflect.Ptr:
+ if objValue.IsNil() {
+ return true
+ }
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ pass := isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ pass := !isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+ return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ ok, l := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if h, ok := t.(interface {
+ Helper()
+ }); ok {
+ h.Helper()
+ }
+
+ if value != true {
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if value != false {
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ listKind := reflect.TypeOf(list).Kind()
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if listKind == reflect.String {
+ elementValue := reflect.ValueOf(element)
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ if listKind == reflect.Map {
+ mapKeys := listValue.MapKeys()
+ for i := 0; i < len(mapKeys); i++ {
+ if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ aKind := reflect.TypeOf(listA).Kind()
+ bKind := reflect.TypeOf(listB).Kind()
+
+ if aKind != reflect.Array && aKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ }
+
+ if bKind != reflect.Array && bKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ }
+
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ if aLen != bLen {
+ return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+ }
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}, string) {
+
+ didPanic := false
+ var message interface{}
+ var stack string
+ func() {
+
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ stack = string(debug.Stack())
+ }
+ }()
+
+ // call the target function
+ f()
+
+ }()
+
+ return didPanic, message, stack
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = float64(xn)
+ case time.Duration:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ }
+
+ if math.IsNaN(af) {
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ }
+
+ if math.IsNaN(bf) {
+ return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+ af, aok := toFloat(expected)
+ if !aok {
+ return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ }
+ if af == 0 {
+ return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+ }
+ bf, bok := toFloat(actual)
+ if !bok {
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+ }
+
+ return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ actualEpsilon, err := calcRelativeError(expected, actual)
+ if err != nil {
+ return Fail(t, err.Error(), msgAndArgs...)
+ }
+ if actualEpsilon > epsilon {
+ return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err != nil {
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if err == nil {
+ return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+ }
+
+ return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+ if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
+
+ if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+ return ""
+ }
+
+ var e, a string
+ if et != reflect.TypeOf("") {
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
+ } else {
+ e = reflect.ValueOf(expected).String()
+ a = reflect.ValueOf(actual).String()
+ }
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return "\n\nDiff:\n" + diff
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
+
+type tHelper interface {
+ Helper()
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return true
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
+ }
+ }
+}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..c9dccc4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the format below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..df189d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..df46fa7
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,143 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return -1, err
+ }
+ req.URL.RawQuery = values.Encode()
+ handler(w, req)
+ return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return !contains
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 0000000..055480b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..b50c6e8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..1f7e87e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..129bc2a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,815 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ if n.alias != nil && n.alias.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ if ni.alias != nil && ni.alias.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..a1c2cc5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..0ee738e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+ Float64() (float64, error)
+ Int64() (int64, error)
+ String() string
+}
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case jsonNumber:
+ integer, err := m.Int64()
+ if err == nil {
+ // In this case the json.Number is a valid int64
+ in = reflect.ValueOf(integer)
+ break
+ }
+ float, err := m.Float64()
+ if err == nil {
+ // In this case the json.Number is a valid float64
+ in = reflect.ValueOf(float)
+ break
+ }
+ // fallback case - no number could be obtained
+ in = reflect.ValueOf(m.String())
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 0000000..1934e87
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..81d05df
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..7c1f5fa
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..4120e0c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..0b9bb60
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ if parser.tokens_head != len(parser.tokens) {
+ // If queue is non-empty, check if any potential simple key may
+ // occupy the head position.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..4c45e66
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..a2dde60
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..89650e2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..f6a9c8e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
new file mode 100644
index 0000000..41de634
--- /dev/null
+++ b/vendor/modules.txt
@@ -0,0 +1,16 @@
+# github.com/aead/cmac v0.0.0-20160719120800-7af84192f0b1
+github.com/aead/cmac/aes
+github.com/aead/cmac
+# github.com/davecgh/go-spew v1.1.1
+github.com/davecgh/go-spew/spew
+# github.com/deckarep/golang-set v1.7.1
+github.com/deckarep/golang-set
+# github.com/google/gopacket v1.1.17
+github.com/google/gopacket
+github.com/google/gopacket/layers
+# github.com/pmezard/go-difflib v1.0.0
+github.com/pmezard/go-difflib/difflib
+# github.com/stretchr/testify v1.5.1
+github.com/stretchr/testify/assert
+# gopkg.in/yaml.v2 v2.2.8
+gopkg.in/yaml.v2